From acdef2fa59c1e077138f8e6695113cefee81e03a Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 6 Feb 2024 10:27:54 +0900 Subject: [PATCH 01/28] Add handling empty predicted tensor --- .../visual_prompters/zero_shot_segment_anything.py | 4 ++++ .../test_zero_shot_segment_anything.py | 13 ++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 14c8e5dd6f2..8321714d577 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -163,6 +163,10 @@ def _point_selection( point_coords = torch.where(mask_sim > threshold) fg_coords_scores = torch.stack(point_coords[::-1] + (mask_sim[point_coords],), dim=0).T + + # to handle empty tensor + len_fg_coords_scores = len(fg_coords_scores) + fg_coords_scores = F.pad(fg_coords_scores, (0, 0, 0, max(0, 1 - len_fg_coords_scores)), value=-1) ratio = self.image_size / original_size.max() width = (original_size[1] * ratio).to(torch.int64) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 4437fdc1f42..a03fa4b838a 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -114,18 +114,21 @@ def test_get_prompt_candidates(self, mocker) -> None: assert bg_coords == "bg_coords" @e2e_pytest_unit - def test_point_selection(self) -> None: + @pytest.mark.parametrize( + "mask_sim,expected", + [ + (torch.arange(0.1, 1.0, 0.1).reshape(3, 3), torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]])), + (torch.zeros(3, 3), torch.tensor([[-1, -1, -1]])) + ]) + def test_point_selection(self, mask_sim: torch.Tensor, expected: torch.Tensor) -> None: """Test _point_selection.""" - mask_sim = torch.arange(0.1, 1.0, 0.1).reshape(self.prompt_getter.image_size, self.prompt_getter.image_size) - points_scores, bg_coords = self.prompt_getter._point_selection( mask_sim=mask_sim, original_size=torch.tensor([self.prompt_getter.image_size, self.prompt_getter.image_size]), threshold=torch.tensor([[0.5]]), ) - assert torch.equal(points_scores, torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]])) - assert torch.equal(bg_coords, torch.tensor([[0, 0]])) + assert torch.equal(points_scores, expected) class TestZeroShotSegmentAnything: From d6978d56522fe63ff81eb6f4311262feda63e543 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 6 Feb 2024 13:37:50 +0900 Subject: [PATCH 02/28] Update image encoder --- .../models/encoders/sam_image_encoder.py | 23 ++------- .../visual_prompters/segment_anything.py | 35 ++++++------- .../models/encoders/test_sam_image_encoder.py | 51 +++++++------------ .../visual_prompters/test_segment_anything.py | 16 +++--- .../visual_prompting/test_helpers.py | 8 +-- 5 files changed, 46 insertions(+), 87 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py index f823593d592..d56df51fa5f 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py @@ -5,7 +5,7 @@ # from omegaconf import DictConfig -from torch import Tensor, nn +from torch import nn from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.backbones import ( build_tiny_vit, @@ -19,26 +19,13 @@ class SAMImageEncoder(nn.Module): Args: config (DictConfig): Config for image encoder. """ - - def __init__(self, config: DictConfig): - super().__init__() + + def __new__(cls, config: DictConfig): if "tiny_vit" == config.backbone: - self.backbone = build_tiny_vit(config.image_size) + return build_tiny_vit(config.image_size) elif "vit" in config.backbone: - self.backbone = build_vit(config.backbone, config.image_size) + return build_vit(config.backbone, config.image_size) else: raise NotImplementedError( (f"{config.backbone} for image encoder of SAM is not implemented yet. " f"Use vit_b, l, or h.") ) - - def forward(self, images: Tensor) -> Tensor: - """Forward function of image encoder. - - Args: - images (Tensor): Input tensor. - - Returns: - image_embeddings (Tensor): Output tensor. - """ - image_embeddings = self.backbone(images) - return image_embeddings diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index 9581d21ab41..3e8f271394d 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -133,32 +133,26 @@ def set_metrics(self) -> None: ) ) - def load_checkpoint( - self, - state_dict: Optional[OrderedDict] = None, - revise_keys: List = [(r"^image_encoder.", r"image_encoder.backbone.")], - ) -> None: + def load_checkpoint(self, state_dict: Optional[OrderedDict] = None) -> None: """Load checkpoint for SAM. Args: state_dict (Optional[OrderedDict], optional): State dict of SAM. Defaults to None. - revise_keys (List, optional): List of tuples of regex patterns to revise keys of state_dict. - Defaults to [(r'^image_encoder.', r'image_encoder.backbone.')]. """ - - def replace_state_dict_keys(state_dict, revise_keys): - for p, r in revise_keys: - state_dict = OrderedDict( - { - re.sub(p, r, k) if re.search(p, k) and not re.search(r, k) else k: v - for k, v in state_dict.items() - } - ) - return state_dict - + def skip_unused_parameters(state_dict): + if self.config.model.backbone == "tiny_vit": + for key in [ + "image_encoder.norm_head.weight", + "image_encoder.norm_head.bias", + "image_encoder.head.weight", + "image_encoder.head.bias", + ]: + if key in state_dict: + state_dict.pop(key) + if state_dict: # state_dict from args.load_from - state_dict = replace_state_dict_keys(state_dict, revise_keys) + skip_unused_parameters(state_dict) self.load_state_dict(state_dict) elif self.config.model.checkpoint: if str(self.config.model.checkpoint).endswith(".ckpt"): @@ -172,7 +166,8 @@ def replace_state_dict_keys(state_dict, revise_keys): # load checkpoint from local with open(self.config.model.checkpoint, "rb") as f: state_dict = torch.load(f) - state_dict = replace_state_dict_keys(state_dict, revise_keys) + + skip_unused_parameters(state_dict) self.load_state_dict(state_dict, strict=False) ########################################################## diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py index e80137685d7..5c2c652ed2e 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py @@ -22,38 +22,21 @@ def forward(self, *args, **kwargs): class TestSAMImageEncoder: - @pytest.fixture(autouse=True) - def setup(self, mocker) -> None: - self.mocker_backbone = mocker.patch( - "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.encoders.sam_image_encoder.build_vit", - return_value=MockBackbone(), - ) - - self.base_config = DictConfig(dict(backbone="vit_b", image_size=1024)) - - @e2e_pytest_unit - @pytest.mark.parametrize("backbone", ["vit_b", "resnet"]) - def test_init(self, backbone: str): - """Test init.""" - self.mocker_backbone.reset_mock() - - config = self.base_config.copy() - config.update(dict(backbone=backbone)) - - if backbone == "resnet": - with pytest.raises(NotImplementedError): - sam_image_encoder = SAMImageEncoder(config) - else: - sam_image_encoder = SAMImageEncoder(config) - self.mocker_backbone.assert_called_once() - + @pytest.fixture() + def config(self, mocker) -> DictConfig: + return DictConfig(dict(image_size=1024)) + @e2e_pytest_unit - def test_forward(self, mocker): - """Test forward.""" - self.mocker_backbone.reset_mock() - - sam_image_encoder = SAMImageEncoder(self.base_config) - mocker_forward = mocker.patch.object(sam_image_encoder.backbone, "forward") - sam_image_encoder.forward(torch.Tensor([1.0])) - - mocker_forward.assert_called_once() + @pytest.mark.parametrize("backbone,expected", + [ + ("tiny_vit", "TinyViT"), + ("vit_b", "ViT"), + ], + ) + def test_new(self, config: DictConfig, backbone: str, expected: str) -> None: + """Test __new__.""" + config.update({"backbone": backbone}) + + sam_image_encoder = SAMImageEncoder(config) + + assert sam_image_encoder.__class__.__name__ == expected diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index fed22e060c8..b8fe9e626bc 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -76,7 +76,7 @@ def test_set_models(self, mocker, backbone: str) -> None: # backbone == vit_b sam = SegmentAnything(config) - assert isinstance(sam.image_encoder, MockImageEncoder) + assert isinstance(sam.image_encoder, nn.Linear) assert isinstance(sam.prompt_encoder, MockPromptEncoder) assert isinstance(sam.mask_decoder, MockMaskDecoder) @@ -159,8 +159,8 @@ def test_set_metrics(self, mocker, loss_type: str): False, OrderedDict( [ - ("image_encoder.weight", Tensor([[0.0]])), - ("image_encoder.bias", Tensor([0.0])), + ("image_encoder.weight", torch.ones(4, 4)), + ("image_encoder.bias", torch.ones(4)), ("prompt_encoder.layer.weight", Tensor([[0.0]])), ("prompt_encoder.layer.bias", Tensor([0.0])), ("mask_decoder.layer.weight", Tensor([[0.0]])), @@ -172,8 +172,8 @@ def test_set_metrics(self, mocker, loss_type: str): True, OrderedDict( [ - ("image_encoder.backbone.weight", Tensor([[1.0]])), - ("image_encoder.backbone.bias", Tensor([1.0])), + ("image_encoder.weight", torch.ones(4, 4)), + ("image_encoder.bias", torch.ones(4)), ("prompt_encoder.layer.weight", Tensor([[1.0]])), ("prompt_encoder.layer.bias", Tensor([1.0])), ("mask_decoder.layer.weight", Tensor([[1.0]])), @@ -196,10 +196,8 @@ def test_load_checkpoint_with_state_dict(self, mocker, is_backbone_arg: bool, st sam_state_dict = sam.state_dict() for k, v in state_dict.items(): - if not is_backbone_arg: - k = k.replace("image_encoder", "image_encoder.backbone") assert k in sam_state_dict - assert v == sam_state_dict[k] + assert torch.all(v == sam_state_dict[k]) @e2e_pytest_unit def test_load_checkpoint_without_checkpoint(self, mocker): @@ -360,7 +358,7 @@ def test_mask_postprocessing(self, mocker) -> None: def test_forward_train(self) -> None: """Test forward.""" sam = SegmentAnything(config=self.base_config) - images = torch.zeros((1)) + images = torch.zeros((1, 3, 4, 4)) bboxes = torch.zeros((1)) results = sam.forward_train(images=images, bboxes=bboxes, points=None) diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index c1be0ae3c89..fb585e22a87 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -148,12 +148,8 @@ def __init__(self, use_mask: bool = False): class MockImageEncoder(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__() - self.backbone = nn.Linear(1, 1) - - def forward(self, *args, **kwargs): - return torch.ones((1, 2, 4, 4)) + def __new__(cls, *args, **kwargs): + return nn.Linear(4, 4) class MockPromptEncoder(nn.Module): From 61d85965628abea114a4d326ce5053c8fca64f0a Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 6 Feb 2024 17:57:44 +0900 Subject: [PATCH 03/28] Enable inner postprocessing --- .../visual_prompters/segment_anything.py | 86 ++++++------------- .../zero_shot_segment_anything.py | 26 +++--- .../visual_prompting/tasks/inference.py | 6 +- .../visual_prompting/tasks/openvino.py | 2 +- .../compressed_decoder.yml | 2 +- .../visual_prompters/test_segment_anything.py | 38 ++++---- .../test_zero_shot_segment_anything.py | 34 ++++---- .../visual_prompting/tasks/test_inference.py | 3 +- .../visual_prompting/tasks/test_openvino.py | 2 +- 9 files changed, 88 insertions(+), 111 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index 3e8f271394d..f6b59922baf 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -181,7 +181,7 @@ def forward( point_labels: Tensor, mask_input: Tensor, has_mask_input: Tensor, - # orig_size: Tensor, + orig_size: Tensor, ): """Forward method for SAM inference (export/deploy). @@ -223,18 +223,16 @@ def forward( if self.config.model.return_single_mask: masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) - return scores, masks - # TODO (sungchul): apply inner postprocessing - # upscaled_masks = self.mask_postprocessing(masks, orig_size[0]) + upscaled_masks = self.postprocess_masks(masks, self.config.model.image_size, orig_size[0]) - # if self.config.model.return_extra_metrics: - # stability_scores = self.calculate_stability_score( - # upscaled_masks, self.config.model.mask_threshold, self.config.model.stability_score_offset - # ) - # areas = (upscaled_masks > self.config.model.mask_threshold).sum(-1).sum(-1) - # return upscaled_masks, scores, stability_scores, areas, masks + if self.config.model.return_extra_metrics: + stability_scores = self.calculate_stability_score( + upscaled_masks, self.config.model.mask_threshold, self.config.model.stability_score_offset + ) + areas = (upscaled_masks > self.config.model.mask_threshold).sum(-1).sum(-1) + return upscaled_masks, scores, stability_scores, areas, masks - # return upscaled_masks, scores, masks + return upscaled_masks, scores, masks def _embed_points(self, point_coords: Tensor, point_labels: Tensor) -> Tensor: """Embed sparse input prompts. @@ -328,10 +326,10 @@ def select_masks(self, masks: Tensor, iou_preds: Tensor, num_points: int) -> Tup iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) return masks, iou_preds - - @staticmethod - def mask_postprocessing(masks: Tensor, input_size: int, orig_size: Tensor) -> Tensor: - """Postprocesses the predicted masks. + + @classmethod + def postprocess_masks(cls, masks: Tensor, input_size: int, orig_size: Tensor) -> Tensor: + """Postprocess the predicted masks. Args: masks (Tensor): A batch of predicted masks with shape Bx1xHxW. @@ -342,22 +340,20 @@ def mask_postprocessing(masks: Tensor, input_size: int, orig_size: Tensor) -> Te Returns: masks (Tensor): The postprocessed masks with shape Bx1xHxW. """ - - def resize_longest_image_size(input_image_size: Tensor, longest_side: int) -> Tensor: - scale = longest_side / torch.max(input_image_size) - transformed_size = scale * input_image_size - transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) - return transformed_size - masks = F.interpolate(masks, size=(input_size, input_size), mode="bilinear", align_corners=False) - prepadded_size = resize_longest_image_size(orig_size, input_size) - masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore + prepadded_size = cls.get_prepadded_size(cls, orig_size, input_size) + masks = masks[..., : prepadded_size[0], : prepadded_size[1]] orig_size = orig_size.to(torch.int64) h, w = orig_size[0], orig_size[1] - masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) - return masks + return F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) + + def get_prepadded_size(self, input_image_size: Tensor, longest_side: int) -> Tensor: + """Get pre-padded size.""" + scale = longest_side / torch.max(input_image_size) + transformed_size = scale * input_image_size + return torch.floor(transformed_size + 0.5).to(torch.int64) ###################################################### # forward for training/validation/prediction # @@ -437,9 +433,9 @@ def training_step(self, batch, batch_idx) -> Tensor: num_masks = sum(len(pred_mask) for pred_mask in pred_masks) for i, (pred_mask, gt_mask, iou_prediction) in enumerate(zip(pred_masks, gt_masks, iou_predictions)): pred_mask = self.postprocess_masks( - pred_mask, images.shape[2:], batch["padding"][i], batch["original_size"][i] + pred_mask, self.config.model.image_size, batch["original_size"][i] ) - pred_mask = pred_mask.sigmoid() + pred_mask = pred_mask.sigmoid().squeeze(1) self.train_metrics["train_IoU"].update(pred_mask, gt_mask) self.train_metrics["train_F1"].update(pred_mask, gt_mask) self.train_metrics["train_Dice"].update(pred_mask, gt_mask) @@ -495,9 +491,9 @@ def validation_step(self, batch, batch_idx) -> MetricCollection: pred_masks, _ = self.forward_train(images, bboxes, points) for i, (pred_mask, gt_mask) in enumerate(zip(pred_masks, gt_masks)): pred_mask = self.postprocess_masks( - pred_mask, images.shape[2:], batch["padding"][i], batch["original_size"][i] + pred_mask, self.config.model.image_size, batch["original_size"][i] ) - pred_mask = pred_mask.sigmoid() + pred_mask = pred_mask.sigmoid().squeeze(1) for k, v in self.val_metrics.items(): v.update(pred_mask, gt_mask) @@ -527,41 +523,15 @@ def predict_step(self, batch, batch_idx) -> Dict[str, Tensor]: masks: List[Tensor] = [] for i, pred_mask in enumerate(pred_masks): - mask = self.postprocess_masks(pred_mask, images.shape[2:], batch["padding"][i], batch["original_size"][i]) + mask = self.postprocess_masks(pred_mask, self.config.model.image_size, batch["original_size"][i]) if not self.config.model.return_logits: mask = (mask > self.config.model.mask_threshold).to(mask.dtype) else: mask = mask.sigmoid() - masks.append(mask) + masks.append(mask.squeeze(1)) return dict(masks=masks, iou_predictions=iou_predictions, path=batch["path"], labels=batch["labels"]) - @staticmethod - def postprocess_masks( - masks: Tensor, - input_size: Tuple[int, int], - padding: Union[Tuple[int, ...], Tensor], - original_size: Union[Tuple[int, int], Tensor], - ) -> Tensor: - """Remove padding and upscale masks to the original image size. - - Args: - masks (Tensor): Predicted masks from the mask_decoder with (N, 1, H/downsized_ratio, W/downsized_ratio). - input_size (tuple(int, int)): The size of the image input to the model, in (H, W) format. - Used to remove padding. - padding (tuple(int, int, int, int), Tensor): The padding applied to the image before input to the model, - in (left, top, right, bottom) format. - original_size (tuple(int, int), Tensor): The original size of the image before resizing - for input to the model, in (H, W) format. - - Returns: - (Tensor): Postprocessed masks in NxHxW format, where (H, W) is given by original_size. - """ - masks = F.interpolate(masks, input_size, mode="bilinear", align_corners=False) - masks = masks[..., : input_size[0] - padding[3], : input_size[1] - padding[2]] - masks = F.interpolate(masks, [int(o) for o in original_size], mode="bilinear", align_corners=False) - return masks.squeeze(1) - def configure_optimizers(self) -> optim: """Configure the optimizer for SAM. diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 8321714d577..4fceb645b39 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -132,7 +132,7 @@ def get_prompt_candidates( sim = self.reference_feats[label].to(device) @ target_feat sim = sim.reshape(1, 1, h_feat, w_feat) - sim = ZeroShotSegmentAnything.mask_postprocessing(sim, self.image_size, original_size[0]) + sim = ZeroShotSegmentAnything.postprocess_masks(sim, self.image_size, original_size[0]) threshold = (threshold == 0) * self.default_threshold_target + threshold points_scores, bg_coords = self._point_selection( @@ -321,7 +321,7 @@ def learn( image_embeddings=image_embeddings, point_coords=point_coords, point_labels=point_labels, - original_size=original_size, + original_size=original_size.unsqueeze(0), is_cascade=False, ) reference_prompt[masks] += 1 @@ -396,7 +396,7 @@ def infer( image_embeddings=image_embeddings, point_coords=point_coords, point_labels=point_labels, - original_size=original_size[0], + original_size=original_size, ) predicted_masks[label].append((mask * points_score[2]).detach().cpu()) used_points[label].append(points_score.detach().cpu()) @@ -463,7 +463,7 @@ def _predict_masks( elif is_cascade and i == 1: # Cascaded Post-refinement-1 - mask_input, masks = self._postprocess_masks(logits, scores, original_size, is_single=True) # noqa: F821 + mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) # noqa: F821 if masks.sum() == 0: return masks @@ -471,7 +471,7 @@ def _predict_masks( elif is_cascade and i == 2: # Cascaded Post-refinement-2 - mask_input, masks = self._postprocess_masks(logits, scores, original_size) # noqa: F821 + mask_input, masks = self._postprocess_masks(masks, logits, scores) # noqa: F821 if masks.sum() == 0: return masks @@ -480,21 +480,22 @@ def _predict_masks( y, x = coords[:, 0], coords[:, 1] box_coords = ResizeLongestSide.apply_coords( torch.tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), - original_size, + original_size[0], self.config.model.image_size, ) point_coords = torch.cat((point_coords, box_coords), dim=1) point_labels = torch.cat((point_labels, self.point_labels_box.to(self.device)), dim=1) - scores, logits = self( + high_res_masks, scores, logits = self( image_embeddings=image_embeddings, point_coords=point_coords, point_labels=point_labels, mask_input=mask_input, has_mask_input=has_mask_input, + orig_size=original_size, ) - - _, masks = self._postprocess_masks(logits, scores, original_size) + masks = high_res_masks > self.config.model.mask_threshold + _, masks = self._postprocess_masks(masks, logits, scores) return masks def training_step(self, batch, batch_idx) -> None: @@ -619,15 +620,12 @@ def _preprocess_masks(self, x: torch.Tensor) -> torch.Tensor: def _postprocess_masks( self, + masks: torch.Tensor, logits: torch.Tensor, scores: torch.Tensor, - original_size: torch.Tensor, is_single: bool = False, ): """Post-process masks for cascaded post-refinements.""" - high_res_masks = self.mask_postprocessing(logits, self.config.model.image_size, original_size) - masks = high_res_masks > self.config.model.mask_threshold - if is_single: best_idx = 0 else: @@ -642,7 +640,7 @@ def _postprocess_masks( if len(scores[0]) == 0: # all predicted masks were zero masks, ignore them. - return None, torch.zeros((self.config.model.image_size, self.config.model.image_size), device="cpu") + return None, torch.zeros(masks.shape[-2:], device="cpu") best_idx = torch.argmax(scores[0]) return logits[:, best_idx], masks[0, best_idx] diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index 1123cd20c87..725f34aae25 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -305,8 +305,9 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), "has_mask_input": torch.tensor([[1]], dtype=torch.float32), + "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.float) } - output_names = ["iou_predictions", "low_res_masks"] + output_names = ["upscaled_masks", "iou_predictions", "low_res_masks"] model_to_export = self.model with warnings.catch_warnings(): @@ -666,8 +667,9 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), "has_mask_input": torch.tensor([[1]], dtype=torch.float32), + "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.float) } - output_names = ["iou_predictions", "low_res_masks"] + output_names = ["upscaled_masks", "iou_predictions", "low_res_masks"] model_to_export = self.model else: diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 60cc3b25331..02d0c9aa4ce 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -526,7 +526,6 @@ def __getitem__(self, index: int): image_embeddings = self.image_encoder(images["images"]) prompt = prompts[0] # only use the first prompt prompt.pop("label") - prompt.pop("orig_size") prompt.update({"image_embeddings": image_embeddings["image_embeddings"]}) return prompt # TODO (sungchul): change has_mask_input @@ -587,6 +586,7 @@ def __getitem__(self, index: int) -> Dict[str, Any]: inputs_decoder.update(image_embeddings) inputs_decoder.update( { + "orig_size": original_size[None], "mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32), "has_mask_input": np.zeros((1, 1), dtype=np.float32), } diff --git a/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml index 9009e81d953..bbefedd68ef 100644 --- a/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml +++ b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml @@ -1,3 +1,3 @@ TestToolsZeroShotVisualPrompting: ptq: - number_of_fakequantizers: 69 + number_of_fakequantizers: 71 diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index b8fe9e626bc..f5dce1d8396 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -344,16 +344,6 @@ def test_select_masks(self) -> None: assert masks[:, -1, :, :] == selected_mask assert iou_preds[:, -1] == selected_iou_pred - @e2e_pytest_unit - def test_mask_postprocessing(self, mocker) -> None: - """Test mask_postprocessing.""" - masks = torch.empty(1, 1, 2, 2) - orig_size = Tensor((8, 8)) - - results = SegmentAnything.mask_postprocessing(masks, 6, orig_size) - - assert results[0, 0].shape == tuple(orig_size) - @e2e_pytest_unit def test_forward_train(self) -> None: """Test forward.""" @@ -494,22 +484,40 @@ def test_predict_step(self, mocker, return_logits: bool, expected: Tensor) -> No @e2e_pytest_unit @pytest.mark.parametrize( - "input_size,original_size,padding,expected", + "input_size,original_size,expected", [ - ((6, 6), (8, 8), (0, 0, 0, 0), (8, 8)), - ((6, 6), (8, 8), (0, 0, 2, 2), (8, 8)), + (6, torch.tensor((8, 8)), (1, 8, 8)), + (6, torch.tensor((8, 8)), (1, 8, 8)), ], ) def test_postprocess_masks( - self, input_size: Tuple[int], original_size: Tuple[int], padding: Tuple[int], expected: Tuple[int] + self, input_size: int, original_size: Tuple[int], expected: Tuple[int] ) -> None: """Test postprocess_masks.""" sam = SegmentAnything(config=self.base_config) masks = torch.zeros((1, 1, 4, 4)) - results = sam.postprocess_masks(masks, input_size, padding, original_size) + results = sam.postprocess_masks(masks, input_size, original_size) assert results.shape[1:] == expected + + @e2e_pytest_unit + @pytest.mark.parametrize( + "input_image_size,expected", + [ + (torch.tensor((2, 4)), torch.tensor((3, 6))), + (torch.tensor((4, 2)), torch.tensor((6, 3))), + ], + ) + def test_get_prepadded_size(self, input_image_size: Tensor, expected: Tensor) -> None: + """Test get_prepadded_size.""" + sam = SegmentAnything(config=self.base_config) + + longest_side = 6 + + results = sam.get_prepadded_size(input_image_size, longest_side) + + assert torch.all(results == expected) @e2e_pytest_unit @pytest.mark.parametrize( diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index a03fa4b838a..3bdb01d79b7 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -201,17 +201,17 @@ def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: processed_prompts = {MockScoredLabel(label=1, name="label"): [{"box": torch.tensor([[0, 0, 1, 1]])}]} zero_shot_segment_anything.learn( - images=torch.ones((1, 3, 8, 8)), + images=torch.ones((1, 3, 4, 4)), processed_prompts=processed_prompts, padding=(0, 0, 0, 0), - original_size=(8, 8), + original_size=torch.tensor((4, 4)), ) - assert zero_shot_segment_anything.prompt_getter.reference_feats.shape == (2, 1, 2) - assert zero_shot_segment_anything.prompt_getter.reference_prompts.shape == (2, 8, 8) + assert zero_shot_segment_anything.prompt_getter.reference_feats.shape == (2, 1, 3) + assert zero_shot_segment_anything.prompt_getter.reference_prompts.shape == (2, 4, 4) @e2e_pytest_unit - @pytest.mark.parametrize("expected", [[torch.ones((8, 8)) / 2, torch.tensor([0.0, 0.0, 0.5])]]) + @pytest.mark.parametrize("expected", [[torch.ones((4, 4)) / 2, torch.tensor([0.0, 0.0, 0.5])]]) def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expected: torch.Tensor) -> None: """Test infer.""" monkeypatch.setattr( @@ -221,13 +221,13 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect zero_shot_segment_anything = set_zero_shot_segment_anything() zero_shot_segment_anything.prompt_getter.reference_feats = torch.rand(1, 1, 4) - zero_shot_segment_anything.prompt_getter.reference_prompts = torch.zeros((8, 8)) + zero_shot_segment_anything.prompt_getter.reference_prompts = torch.zeros((4, 4)) mocker.patch.object( - SegmentAnything, "forward", return_value=(torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) + SegmentAnything, "forward", return_value=(torch.ones(1, 4, 4, 4), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) ) total_results = zero_shot_segment_anything.infer( - images=torch.ones((1, 3, 8, 8)), original_size=torch.tensor([[8, 8]], dtype=torch.int64) + images=torch.ones((1, 3, 4, 4)), original_size=torch.tensor([[4, 4]], dtype=torch.int64) ) for i, results in enumerate(total_results[0]): @@ -235,11 +235,10 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect assert torch.equal(result[0], expected[i]) @e2e_pytest_unit - @pytest.mark.parametrize("is_postprocess", [True, False]) - def test_predict_masks(self, mocker, set_zero_shot_segment_anything, is_postprocess: bool) -> None: + def test_predict_masks(self, mocker, set_zero_shot_segment_anything) -> None: """Test _predict_masks.""" mocker.patch.object( - SegmentAnything, "forward", return_value=(torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) + SegmentAnything, "forward", return_value=(torch.ones(1, 4, 8, 8), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) ) zero_shot_segment_anything = set_zero_shot_segment_anything() @@ -249,7 +248,7 @@ def test_predict_masks(self, mocker, set_zero_shot_segment_anything, is_postproc image_embeddings=torch.rand(1), point_coords=torch.rand(1, 2, 2), point_labels=torch.randint(low=0, high=2, size=(1, 2)), - original_size=torch.tensor((8, 8), dtype=torch.int64), + original_size=torch.tensor([[8, 8]], dtype=torch.int64), ) assert mask.shape == (8, 8) @@ -300,22 +299,21 @@ def test_preprocess_masks(self, set_zero_shot_segment_anything) -> None: @e2e_pytest_unit @pytest.mark.parametrize( - "logits,expected", + "masks,logits,expected", [ - (torch.ones(1, 4, 4, 4), torch.ones(4, 4, dtype=torch.bool)), - (torch.zeros(1, 4, 4, 4), torch.zeros(4, 4, dtype=torch.bool)), + (torch.ones(1, 4, 8, 8), torch.ones(1, 4, 4, 4), torch.ones(8, 8)), + (torch.zeros(1, 4, 8, 8), torch.zeros(1, 4, 4, 4), torch.zeros(8, 8)), ], ) def test_postprocess_masks( - self, set_zero_shot_segment_anything, logits: torch.Tensor, expected: torch.Tensor + self, set_zero_shot_segment_anything, masks: torch.Tensor, logits: torch.Tensor, expected: torch.Tensor ) -> None: """Test _postprocess_masks.""" zero_shot_segment_anything = set_zero_shot_segment_anything() zero_shot_segment_anything.config.model.image_size = 4 scores = torch.tensor([[0.0, 0.1, 0.2, 0.3]]) - original_size = torch.tensor([4, 4], dtype=torch.int64) - _, result = zero_shot_segment_anything._postprocess_masks(logits, scores, original_size) + _, result = zero_shot_segment_anything._postprocess_masks(masks, logits, scores) assert torch.equal(result, expected) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index acd9d0c48ca..8c717a45421 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -353,12 +353,13 @@ def test_export_to_onnx(self): "point_labels": np.random.randint(low=0, high=4, size=(1, 2)).astype(np.float32), "mask_input": np.random.randn(1, 1, *mask_input_size).astype(np.float32), "has_mask_input": np.array([[1]], dtype=np.float32), + "orig_size": np.random.randint(low=256, high=2048, size=(1, 2)).astype(np.float32), }, } onnx_outputs = { "visual_prompting_image_encoder": ["image_embeddings"], "visual_prompting_prompt_getter": ["total_points_scores", "total_bg_coords"], - "visual_prompting_decoder": ["iou_predictions", "low_res_masks"], + "visual_prompting_decoder": ["upscaled_masks", "iou_predictions", "low_res_masks"], } onnx_rt_models = { diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 4181a8b2bab..a7af582cd63 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -365,7 +365,7 @@ def test_getitem(self, mocker, load_dataloader, module_name: str): self.mocker_read_model.assert_called_once() self.mocker_compile_model.assert_called_once() assert "label" not in results - assert "orig_size" not in results + assert "orig_size" in results assert "image_embeddings" in results From b4d66b2ddd16ee5c2ca5eddcba4049e6ca3eea3f Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 8 Feb 2024 15:10:09 +0900 Subject: [PATCH 04/28] Update OV inferencer (TODO: zsl) --- .../model_wrappers/openvino_models.py | 45 +------------------ .../visual_prompting/tasks/inference.py | 4 +- .../visual_prompting/tasks/openvino.py | 35 +++++++-------- .../model_wrappers/test_openvino_models.py | 26 +---------- .../test_zero_shot_segment_anything.py | 17 +++---- .../visual_prompting/tasks/test_inference.py | 2 +- .../visual_prompting/tasks/test_openvino.py | 35 +++++++-------- 7 files changed, 46 insertions(+), 118 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 1bdc1a473a3..432d0010e09 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -94,7 +94,7 @@ def parameters(cls): # noqa: D102 return parameters def _get_outputs(self): - return "low_res_masks" + return "upscaled_masks" def preprocess(self, inputs: Dict[str, Any], meta: Dict[str, Any]) -> List[Dict[str, Any]]: """Preprocess prompts.""" @@ -111,7 +111,7 @@ def preprocess(self, inputs: Dict[str, Any], meta: Dict[str, Any]) -> List[Dict[ # TODO (sungchul): how to generate mask_input and has_mask_input "mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32), "has_mask_input": np.zeros((1, 1), dtype=np.float32), - "orig_size": np.array(inputs["original_size"], dtype=np.float32).reshape((-1, 2)), + "orig_size": np.array(inputs["original_size"], dtype=np.int64).reshape((-1, 2)), "label": label, } ) @@ -159,7 +159,6 @@ def sigmoid(x): return np.tanh(x * 0.5) * 0.5 + 0.5 # to avoid overflow soft_prediction = outputs[self.output_blob_name].squeeze() - soft_prediction = self.resize_and_crop(soft_prediction, meta["original_size"][0]) soft_prediction = sigmoid(soft_prediction) meta["soft_prediction"] = soft_prediction @@ -173,43 +172,3 @@ def sigmoid(x): meta["label"].probability = probability return hard_prediction, soft_prediction - - def resize_and_crop(self, soft_prediction: np.ndarray, original_size: np.ndarray) -> np.ndarray: - """Resize and crop soft prediction. - - Args: - soft_prediction (np.ndarray): Predicted soft prediction with HxW shape. - original_size (np.ndarray): The original image size. - - Returns: - final_soft_prediction (np.ndarray): Resized and cropped soft prediction for the original image. - """ - resized_soft_prediction = cv2.resize( - soft_prediction, (self.image_size, self.image_size), 0, 0, interpolation=cv2.INTER_LINEAR - ) - - prepadded_size = self.get_padded_size(original_size, self.image_size).astype(np.int64) - resized_cropped_soft_prediction = resized_soft_prediction[: prepadded_size[0], : prepadded_size[1], ...] - - original_size = original_size.astype(np.int64) - h, w = original_size - final_soft_prediction = cv2.resize( - resized_cropped_soft_prediction, (w, h), 0, 0, interpolation=cv2.INTER_LINEAR - ) - return final_soft_prediction - - def get_padded_size(self, original_size: np.ndarray, longest_side: int) -> np.ndarray: - """Get padded size from original size and longest side of the image. - - Args: - original_size (np.ndarray): The original image size with shape Bx2. - longest_side (int): The size of the longest side. - - Returns: - transformed_size (np.ndarray): The transformed image size with shape Bx2. - """ - original_size = original_size.astype(np.float32) - scale = longest_side / np.max(original_size) - transformed_size = scale * original_size - transformed_size = np.floor(transformed_size + 0.5).astype(np.int64) - return transformed_size diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index 725f34aae25..8bff9bd55f7 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -305,7 +305,7 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), "has_mask_input": torch.tensor([[1]], dtype=torch.float32), - "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.float) + "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.int64) } output_names = ["upscaled_masks", "iou_predictions", "low_res_masks"] model_to_export = self.model @@ -667,7 +667,7 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), "has_mask_input": torch.tensor([[1]], dtype=torch.float32), - "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.float) + "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.int64) } output_names = ["upscaled_masks", "iou_predictions", "low_res_masks"] model_to_export = self.model diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 02d0c9aa4ce..7751713aa47 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -176,12 +176,11 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: soft_predictions: List[np.ndarray] = [] for prompt in prompts: label = prompt.pop("label") - orig_size = prompt.pop("orig_size") prompt.update(image_embeddings) # forward decoder to get predicted mask prediction = self.forward_decoder(prompt) - metadata = {"label": label, "original_size": orig_size} + metadata = {"label": label} # set annotation for eval annotation, hard_prediction, soft_prediction = self.post_process(prediction, metadata) @@ -378,42 +377,40 @@ def forward_decoder( # type: ignore elif i == 1: # Cascaded Post-refinement-1 - mask_input, masks, iou_predictions = self._postprocess_masks( - logits, scores, original_size, is_single=True # noqa: F821 - ) + mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) if masks.sum() == 0: - return {"iou_predictions": iou_predictions, "low_res_masks": mask_input} + return {"masks": masks} has_mask_input = self.has_mask_inputs[1] elif i == 2: # Cascaded Post-refinement-2 - mask_input, masks, iou_predictions = self._postprocess_masks( - logits, scores, original_size # noqa: F821 - ) + mask_input, masks = self._postprocess_masks(masks, logits, scores) if masks.sum() == 0: - return {"iou_predictions": iou_predictions, "low_res_masks": mask_input} + return {"masks": masks} has_mask_input = self.has_mask_inputs[1] y, x = np.nonzero(masks) box_coords = self.model["decoder"]._apply_coords( np.array([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=np.float32), original_size ) - inputs["point_coords"] = np.concatenate((inputs["point_coords"], box_coords), axis=1) - inputs["point_labels"] = np.concatenate((inputs["point_labels"], self.point_labels_box), axis=1) + inputs.update({ + "point_coords": np.concatenate((inputs["point_coords"], box_coords), axis=1), + "point_labels": np.concatenate((inputs["point_labels"], self.point_labels_box), axis=1), + }) inputs.update({"mask_input": mask_input, "has_mask_input": has_mask_input}) prediction = self.model["decoder"].infer_sync(inputs) - scores, logits = prediction["iou_predictions"], prediction["low_res_masks"] + upscaled_masks, scores, logits = prediction["upscaled_masks"], prediction["iou_predictions"], prediction["low_res_masks"] + masks = upscaled_masks > self.model["decoder"].mask_threshold - return {"iou_predictions": scores[:, mask_slice], "low_res_masks": logits[:, mask_slice, :, :]} + _, masks = self._postprocess_masks(masks, logits, scores) + return {"masks": masks} def _postprocess_masks( - self, logits: np.ndarray, scores: np.ndarray, original_size: np.ndarray, is_single: bool = False + self, masks: np.ndarray, logits: np.ndarray, scores: np.ndarray, is_single: bool = False ) -> Tuple[np.ndarray, ...]: """Post-process logits for resized masks according to best index based on scores.""" - high_res_masks = self.model["decoder"].resize_and_crop(logits[0].transpose(1, 2, 0), original_size) - masks = high_res_masks > self.model["decoder"].mask_threshold masks = masks.transpose(2, 0, 1)[None] if is_single: @@ -430,10 +427,10 @@ def _postprocess_masks( if len(scores[0]) == 0: # all predicted masks were zero masks, ignore them. - return None, np.zeros((self.model["decoder"].image_size, self.model["decoder"].image_size)), 0.0 + return None, np.zeros(masks.shape[-2:]) best_idx = np.argmax(scores[0]) - return logits[:, [best_idx]], masks[0, best_idx], scores[0, best_idx] + return logits[:, [best_idx]], masks[0, best_idx] def __inspect_overlapping_areas( self, diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index 7740de14ab9..9b5093b004a 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -79,7 +79,7 @@ def test_get_outputs(self): """Test _get_outputs.""" results = self.decoder._get_outputs() - assert "low_res_masks" == results + assert "upscaled_masks" == results @e2e_pytest_unit def test_preprocess(self): @@ -142,27 +142,3 @@ def test_postprocess(self, mocker): returned_value = self.decoder.postprocess(outputs=fake_output, meta=fake_metadata) assert isinstance(returned_value, tuple) - assert np.all(returned_value[0].shape == fake_metadata["original_size"]) - assert np.all(returned_value[1].shape == fake_metadata["original_size"]) - - @e2e_pytest_unit - def test_resize_and_crop(self, mocker): - """Test resize_and_crop.""" - mocker.patch.object(self.decoder, "get_padded_size", return_value=np.array((6, 6))) - - masks = np.zeros((2, 2)) - orig_size = np.array((8, 8)) - - results = self.decoder.resize_and_crop(masks, orig_size) - - assert results.shape == tuple(orig_size) - - @e2e_pytest_unit - def test_get_padded_size(self): - """Test get_padded_size.""" - original_size = np.array((2, 4)) - longest_side = 6 - - results = self.decoder.get_padded_size(original_size, longest_side) - - assert np.all(results == np.array((3, 6))) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 3bdb01d79b7..8d145bfcaaa 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -74,13 +74,13 @@ def test_set_reference(self) -> None: assert self.prompt_getter.reference_prompts[3].sum() == 9 @e2e_pytest_unit - def test_forward(self, mocker) -> None: + @pytest.mark.parametrize("result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])]) + def test_forward(self, mocker, result_point_selection: torch.Tensor) -> None: """Test forward.""" mocker.patch.object( self.prompt_getter, "get_prompt_candidates", - return_value=(torch.tensor([[[0, 0, 0.5], [1, 1, 0.7]]]), torch.tensor([[[2, 2]]])), - ) + return_value=(result_point_selection, torch.zeros(1, 2))) image_embeddings = torch.ones(1, 4, 4, 4) self.prompt_getter.reference_feats = torch.rand(1, 1, 4) original_size = torch.tensor((self.prompt_getter.image_size, self.prompt_getter.image_size), dtype=torch.int64) @@ -88,17 +88,18 @@ def test_forward(self, mocker) -> None: total_points_scores, total_bg_coords = self.prompt_getter( image_embeddings=image_embeddings, original_size=original_size ) - + assert total_points_scores.shape[0] == 1 assert total_bg_coords.shape[0] == 1 @e2e_pytest_unit - def test_get_prompt_candidates(self, mocker) -> None: + @pytest.mark.parametrize("result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])]) + def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tensor) -> None: """Test get_prompt_candidates.""" mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" ) - mocker.patch.object(self.prompt_getter, "_point_selection", return_value=("points_scores", "bg_coords")) + mocker.patch.object(self.prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2))) image_embeddings = torch.ones(1, 4, 4, 4) self.prompt_getter.reference_feats = torch.rand(1, 1, 4) label = torch.tensor([[0]], dtype=torch.int64) @@ -110,8 +111,8 @@ def test_get_prompt_candidates(self, mocker) -> None: image_embeddings=image_embeddings, label=label, original_size=original_size ) - assert points_scores == "points_scores" - assert bg_coords == "bg_coords" + assert torch.all(points_scores == result_point_selection) + assert torch.all(bg_coords == torch.zeros(1, 2)) @e2e_pytest_unit @pytest.mark.parametrize( diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index 8c717a45421..051ddcdfe8f 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -353,7 +353,7 @@ def test_export_to_onnx(self): "point_labels": np.random.randint(low=0, high=4, size=(1, 2)).astype(np.float32), "mask_input": np.random.randn(1, 1, *mask_input_size).astype(np.float32), "has_mask_input": np.array([[1]], dtype=np.float32), - "orig_size": np.random.randint(low=256, high=2048, size=(1, 2)).astype(np.float32), + "orig_size": np.random.randint(low=256, high=2048, size=(1, 2)).astype(np.int64), }, } onnx_outputs = { diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index a7af582cd63..459aba889f5 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -202,6 +202,7 @@ def setup(self, mocker): self.visual_prompting_ov_inferencer.model["decoder"] = mocker.patch( "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", autospec=True ) + self.visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.3 self.visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) @e2e_pytest_unit @@ -243,14 +244,14 @@ def test_predict(self, mocker): "postprocess_output,infer_sync_output,expected", [ ( - (np.ones((1, 1)), np.ones((3, 3)), 0.9), - {"iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, - {"iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + (np.ones((1, 1)), np.ones((3, 3))), + {"upscaled_masks": np.ones((3, 3)), "iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + {"masks": np.ones((3, 3))}, ), ( - (np.zeros((2, 2)), np.zeros((3, 3)), 0.0), - {"iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, - {"iou_predictions": 0.0, "low_res_masks": np.zeros((2, 2))}, + (np.zeros((2, 2)), np.zeros((3, 3))), + {"upscaled_masks": np.zeros((3, 3)), "iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + {"masks": np.zeros((3, 3))}, ), ], ) @@ -281,17 +282,15 @@ def test_forward_decoder( original_size=np.array([3, 3]), ) - assert np.all(result["iou_predictions"] == expected["iou_predictions"]) - assert np.all(result["low_res_masks"] == expected["low_res_masks"]) + assert np.all(result["masks"] == expected["masks"]) @e2e_pytest_unit @pytest.mark.parametrize( - "high_res_masks,expected_masks,expected_scores", + "masks,expected_masks", [ ( np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[..., None], 4, axis=-1), - np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_), - 0.9, + np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_) ), ( np.concatenate( @@ -301,25 +300,21 @@ def test_forward_decoder( ), axis=-1, ), - np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_), - 0.8, + np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_) ), - (np.zeros((3, 3, 4)), np.zeros((3, 3)), 0.0), + (np.zeros((3, 3, 4)), np.zeros((3, 3))), ], ) - def test_postprocess_masks(self, high_res_masks: np.ndarray, expected_masks: np.ndarray, expected_scores: float): + def test_postprocess_masks(self, masks: np.ndarray, expected_masks: np.ndarray): """Test _postprocess_masks.""" - self.visual_prompting_ov_inferencer.model["decoder"].resize_and_crop.return_value = high_res_masks self.visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.0 self.visual_prompting_ov_inferencer.model["decoder"].image_size = 3 - _, result_masks, result_scores = self.visual_prompting_ov_inferencer._postprocess_masks( - logits=np.empty((1, 4, 2, 2)), scores=np.array([[0.5, 0.7, 0.8, 0.9]]), original_size=np.array([3, 3]) - ) + _, result_masks = self.visual_prompting_ov_inferencer._postprocess_masks( + masks=masks, logits=np.empty((1, 4, 2, 2)), scores=np.array([[0.5, 0.7, 0.8, 0.9]])) assert result_masks.shape == (3, 3) assert np.all(result_masks == expected_masks) - assert result_scores == expected_scores class TestOTXOpenVinoDataLoader: From 205c7635912072460dca6d411e33f7edb955a301 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 13 Feb 2024 11:44:42 +0900 Subject: [PATCH 05/28] Update `learn` pipeline --- .../datasets/pipelines/transforms.py | 4 - .../zero_shot_segment_anything.py | 390 +++++++++--------- .../visual_prompting/tasks/inference.py | 11 +- .../datasets/pipelines/test_transforms.py | 11 +- .../test_zero_shot_segment_anything.py | 81 ++-- .../visual_prompting/tasks/test_inference.py | 5 +- .../visual_prompting/test_helpers.py | 1 + 7 files changed, 226 insertions(+), 277 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py index f53fb4b3457..05b0693a39a 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py @@ -43,7 +43,6 @@ def _convert_empty_to_none(x: str, dtype: torch.dtype = torch.float32) -> List: points = None # TBD gt_masks = _convert_empty_to_none("gt_masks", torch.int32) original_size = _convert_empty_to_none("original_size") - padding = [item["padding"] for item in batch] path = [item["path"] for item in batch] labels = [item["labels"] for item in batch] if gt_masks: @@ -56,7 +55,6 @@ def _convert_empty_to_none(x: str, dtype: torch.dtype = torch.float32) -> List: "original_size": original_size, "path": path, "labels": labels, - "padding": padding, } return { "index": -1, @@ -67,7 +65,6 @@ def _convert_empty_to_none(x: str, dtype: torch.dtype = torch.float32) -> List: "original_size": [], "path": [], "labels": [], - "padding": [], } @@ -89,7 +86,6 @@ def __call__(self, item: Dict[str, Union[List[Any], Tensor]]) -> Dict[str, Union pad_h = max_dim - h padding = (0, 0, pad_w, pad_h) - item["padding"] = padding item["images"] = pad(item["images"], padding, fill=0, padding_mode="constant") return item diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 4fceb645b39..1f26ae73b28 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -10,7 +10,8 @@ import torch from omegaconf import DictConfig -from torch import nn +from torch import nn, Tensor +from torch.nn import Parameter, ParameterDict from torch.nn import functional as F from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( @@ -30,107 +31,73 @@ class PromptGetter(nn.Module): default_threshold_reference = 0.3 default_threshold_target = 0.65 - def __init__( - self, - image_size: int, - reference_feats: Optional[torch.Tensor] = None, - reference_prompts: Optional[torch.Tensor] = None, - downsizing: int = 64, - ) -> None: + def __init__(self, image_size: int, downsizing: int = 64) -> None: super().__init__() self.image_size = image_size self.downsizing = downsizing - self.initialize(reference_feats, reference_prompts) - self.zero_tensor = torch.tensor(0) - - def initialize( - self, reference_feats: Optional[torch.Tensor] = None, reference_prompts: Optional[torch.Tensor] = None - ) -> None: - """Initialize reference features and prompts.""" - self.reference_feats = reference_feats - self.reference_prompts = reference_prompts + self.zero_tensor = torch.as_tensor(0) def set_default_thresholds(self, default_threshold_reference: float, default_threshold_target: float) -> None: """Set default thresholds.""" self.default_threshold_reference = default_threshold_reference self.default_threshold_target = default_threshold_target - def set_reference(self, label: ScoredLabel, reference_feats: torch.Tensor, reference_prompts: torch.Tensor) -> None: - """Set reference features and prompts.""" - if self.reference_feats is None: - self.reference_feats = torch.zeros_like(reference_feats).unsqueeze(0) - if self.reference_prompts is None: - self.reference_prompts = torch.zeros_like(reference_prompts).unsqueeze(0) - - for idx in range(int(label.id_) + 1): - if idx == int(label.id_): - while self.reference_feats.shape[0] - 1 < idx: - self.reference_feats = torch.cat( - (self.reference_feats, torch.zeros_like(reference_feats).unsqueeze(0)), dim=0 - ) - self.reference_prompts = torch.cat( - (self.reference_prompts, torch.zeros_like(reference_prompts).unsqueeze(0)), dim=0 - ) - self.reference_feats[idx] = reference_feats - self.reference_prompts[idx] = reference_prompts - def forward( self, - image_embeddings: torch.Tensor, - original_size: torch.Tensor, - threshold: torch.Tensor = torch.tensor([[0.0]], dtype=torch.float32), - num_bg_points: torch.Tensor = torch.tensor([[1]], dtype=torch.int64), - ) -> Tuple[torch.Tensor, torch.Tensor]: + image_embedding: Tensor, + reference_feats: Tensor, + used_indices: Tensor, + original_size: Tensor, + threshold: Tensor = torch.as_tensor([[0.0]], dtype=torch.float32), + num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), + ) -> Tuple[Tensor, Tensor]: """Get prompt candidates.""" - total_points_scores: torch.Tensor - total_bg_coords: torch.Tensor - - device = image_embeddings.device + device = image_embedding.device threshold = threshold.to(device) - for label in torch.arange(self.reference_feats.shape[0]): + + total_points_scores: Tensor = torch.zeros(used_indices.max() + 1, 0, 3, device=device) + total_bg_coords: Tensor = torch.zeros(used_indices.max() + 1, num_bg_points, 2, device=device) + for label in used_indices[0]: points_scores, bg_coords = self.get_prompt_candidates( - image_embeddings=image_embeddings, - label=label, + image_embedding=image_embedding, + reference_feat=reference_feats[label], original_size=original_size, threshold=threshold, num_bg_points=num_bg_points, device=device, ) - if label == 0: - total_points_scores = points_scores.unsqueeze(0) - total_bg_coords = bg_coords.unsqueeze(0) - else: - pad_size = torch.tensor(points_scores.shape[0] - total_points_scores.shape[1]) - pad_tot = torch.max(self.zero_tensor, pad_size) - pad_cur = torch.max(self.zero_tensor, -pad_size) + + pad_size = torch.as_tensor(points_scores.shape[0] - total_points_scores.shape[1]) + pad_tot = torch.max(self.zero_tensor, pad_size) + pad_cur = torch.max(self.zero_tensor, -pad_size) - total_points_scores = F.pad(total_points_scores, (0, 0, 0, pad_tot, 0, 0), value=-1) - points_scores = F.pad(points_scores, (0, 0, 0, pad_cur), value=-1) + total_points_scores = F.pad(total_points_scores, (0, 0, 0, pad_tot, 0, 0), value=-1) + points_scores = F.pad(points_scores, (0, 0, 0, pad_cur), value=-1) - total_points_scores = torch.cat((total_points_scores, points_scores.unsqueeze(0)), dim=0) - total_bg_coords = torch.cat((total_bg_coords, bg_coords.unsqueeze(0)), dim=0) + total_points_scores[label] = points_scores + total_bg_coords[label] = bg_coords return total_points_scores, total_bg_coords def get_prompt_candidates( self, - image_embeddings: torch.Tensor, - label: torch.Tensor, - original_size: torch.Tensor, - threshold: torch.Tensor = torch.tensor([[0.0]], dtype=torch.float32), - num_bg_points: torch.Tensor = torch.tensor([[1]], dtype=torch.int64), - device: torch.device = torch.device("cpu"), - ) -> Tuple[torch.Tensor, torch.Tensor]: + image_embedding: Tensor, + reference_feat: Tensor, + original_size: Tensor, + threshold: Tensor = torch.as_tensor([[0.0]], dtype=torch.float32), + num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), + device: Union[torch.device, str] = torch.device("cpu"), + ) -> Tuple[Tensor, Tensor]: """Get prompt candidates from given reference and target features.""" assert original_size.dim() == 2 and threshold.dim() == 2 and num_bg_points.dim() == 2 - target_feat = image_embeddings.squeeze() + target_feat = image_embedding.squeeze() c_feat, h_feat, w_feat = target_feat.shape target_feat = target_feat / target_feat.norm(dim=0, keepdim=True) target_feat = target_feat.reshape(c_feat, h_feat * w_feat) - sim = self.reference_feats[label].to(device) @ target_feat + sim = reference_feat.to(device) @ target_feat sim = sim.reshape(1, 1, h_feat, w_feat) sim = ZeroShotSegmentAnything.postprocess_masks(sim, self.image_size, original_size[0]) @@ -146,11 +113,11 @@ def get_prompt_candidates( def _point_selection( self, - mask_sim: torch.Tensor, - original_size: torch.Tensor, - threshold: torch.Tensor, - num_bg_points: torch.Tensor = torch.tensor([[1]], dtype=torch.int64), - ) -> Tuple[torch.Tensor, torch.Tensor]: + mask_sim: Tensor, + original_size: Tensor, + threshold: Tensor, + num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), + ) -> Tuple[Tensor, Tensor]: """Select point used as point prompts.""" _, w_sim = mask_sim.shape @@ -202,40 +169,36 @@ def __init__(self, config: Optional[DictConfig] = None, state_dict: Optional[Ord if config is None: config = self.set_default_config() - if not config.model.freeze_image_encoder: - logger.warning("config.model.freeze_image_encoder(=False) must be set to True, changed.") - config.model.freeze_image_encoder = True - - if not config.model.freeze_prompt_encoder: - logger.warning("config.model.freeze_prompt_encoder(=False) must be set to True, changed.") - config.model.freeze_prompt_encoder = True - - if not config.model.freeze_mask_decoder: - logger.warning("config.model.freeze_mask_decoder(=False) must be set to True, changed.") - config.model.freeze_mask_decoder = True - - prompt_getter_reference_feats = None - prompt_getter_reference_prompts = None - if state_dict: - if "prompt_getter.reference_feats" in state_dict: - prompt_getter_reference_feats = state_dict.pop("prompt_getter.reference_feats") - if "prompt_getter.reference_prompts" in state_dict: - prompt_getter_reference_prompts = state_dict.pop("prompt_getter.reference_prompts") + # check freeze conditions + for condition in ["freeze_image_encoder", "freeze_prompt_encoder", "freeze_mask_decoder"]: + if not getattr(config.model, condition, False): + logger.warning(f"config.model.{condition}(=False) must be set to True, changed.") + setattr(config.model, condition, True) - super().__init__(config, state_dict) + super().__init__(config, None) + self.set_empty_reference_info() + self._register_load_state_dict_pre_hook(self.load_state_dict_pre_hook) + self.load_checkpoint(state_dict) - self.prompt_getter = PromptGetter( - image_size=config.model.image_size, - reference_feats=prompt_getter_reference_feats, - reference_prompts=prompt_getter_reference_prompts, - ) + self.prompt_getter = PromptGetter(image_size=config.model.image_size) self.prompt_getter.set_default_thresholds( default_threshold_reference=config.model.default_threshold_reference, default_threshold_target=config.model.default_threshold_target, ) - self.point_labels_box = torch.tensor([[2, 3]], dtype=torch.float32) - self.has_mask_inputs = [torch.tensor([[0.0]]), torch.tensor([[1.0]])] + self.point_labels_box = torch.as_tensor([[2, 3]], dtype=torch.float32) + self.has_mask_inputs = [torch.as_tensor([[0.0]]), torch.as_tensor([[1.0]])] + + def load_state_dict_pre_hook(self, state_dict: dict[str, Any], prefix: str = "", *args, **kwargs) -> None: + """Load reference info manually.""" + _reference_feats: Tensor = state_dict.get("reference_info.reference_feats", torch.as_tensor([], dtype=torch.float32)) + _used_indices: Tensor = state_dict.get("reference_info.used_indices", torch.as_tensor([], dtype=torch.int64)) + self.reference_info = ParameterDict( + { + "reference_feats": Parameter(_reference_feats, requires_grad=False), + "used_indices": Parameter(_used_indices, requires_grad=False), + }, + ) def set_default_config(self) -> DictConfig: """Set default config when using independently.""" @@ -254,14 +217,42 @@ def set_default_config(self) -> DictConfig: } } ) + + def set_empty_reference_info(self) -> None: + """Set empty reference information.""" + reference_feats: Parameter = Parameter(torch.as_tensor([], dtype=torch.float32), requires_grad=False) + used_indices: Parameter = Parameter(torch.as_tensor([], dtype=torch.int64), requires_grad=False) + self.reference_info = ParameterDict( + { + "reference_feats": reference_feats, + "used_indices": used_indices, + }, + ) + self.is_reference_info_empty = True + + def initialize_reference_info(self, largest_label: int) -> None: + """Initialize reference information.""" + self.reference_info["reference_feats"] = Parameter(torch.zeros(largest_label + 1, 1, 256), requires_grad=False) + self.reference_info["used_indices"] = Parameter(torch.as_tensor([], dtype=torch.int64), requires_grad=False) + self.is_reference_info_empty = False + + def expand_reference_info(self, new_largest_label: int) -> None: + """Expand reference info dimensions if newly given processed prompts have more lables.""" + if new_largest_label > (cur_largest_label := len(self.reference_info["reference_feats"]) - 1): + diff = new_largest_label - cur_largest_label + self.reference_info["reference_feats"] = F.pad( + self.reference_info["reference_feats"], + (0, 0, 0, 0, 0, diff), + value=0.0, + ) @torch.no_grad() def learn( self, - images: torch.Tensor, - processed_prompts: Dict[ScoredLabel, List[Dict[str, torch.Tensor]]], - padding: Union[Tuple[int, ...], torch.Tensor], - original_size: torch.Tensor, + images: Tensor, + processed_prompts: Dict[ScoredLabel, List[Dict[str, Tensor]]], + original_size: Tensor, + return_outputs: bool = False, ) -> None: """Get reference features. @@ -270,19 +261,25 @@ def learn( Currently, single batch is only supported. Args: - images (torch.Tensor): Given images for reference features. - processed_prompts (Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]): The whole class-wise prompts + images (Tensor): Given images for reference features. + processed_prompts (Dict[ScoredLabel, List[Dict[str, Tensor]]]): The whole class-wise prompts processed at _preprocess_prompts. - padding (Union[Tuple[int, ...], torch.Tensor]): Padding size. - original_size (torch.Tensor): Original image size. + original_size (Tensor): Original image size. """ assert images.shape[0] == 1, "Only single batch is supported." - - self.prompt_getter.initialize() + + # initialize tensors to contain reference features and prompts + largest_label = max([int(label.id) for label in processed_prompts.keys()]) + if self.is_reference_info_empty: + self.initialize_reference_info(largest_label) + else: + self.expand_reference_info(largest_label) + # TODO(sungchul): consider who to handle multiple reference features, currently replace it # noqa: TD003 image_embeddings = self.image_encoder(images) - ref_feat = image_embeddings.squeeze().permute(1, 2, 0) - + processed_embedding = image_embeddings.squeeze().permute(1, 2, 0) + + ref_masks = torch.zeros(largest_label + 1, *map(int, original_size)) for label, input_prompts in processed_prompts.items(): if label.name.lower() == "background": # skip background @@ -291,32 +288,28 @@ def learn( # generate reference mask # TODO (sungchul): ensemble multi reference features (current : use merged masks) - reference_prompt = torch.zeros(*map(int, original_size), dtype=torch.uint8, device=self.device) + ref_mask = torch.zeros(*map(int, original_size), dtype=torch.uint8, device=self.device) for input_prompt in input_prompts: - if "annotation" in input_prompt: + if (anno_prompt := input_prompt.get("annotation", None)) is not None: # directly use annotation information as a mask - reference_prompt[input_prompt.get("annotation") == 1] += 1 + ref_mask[anno_prompt == 1] += 1 else: - merged_input_prompts = self._merge_prompts(label, input_prompt, processed_prompts) + merged_input_prompts: Dict[str, Tensor] = self._merge_prompts(label, input_prompt, processed_prompts) # TODO (sungchul): they must be processed in `_merge_prompts` # and it is required to be expanded to other prompts. point_coords = [] point_labels = [] - if "box" in merged_input_prompts: - for box in merged_input_prompts["box"]: - point_coords.append(box[:2]) - point_labels.append(2) - point_coords.append(box[2:]) - point_labels.append(3) - - if "points" in merged_input_prompts: - raise NotImplementedError() + if (input_prompt := merged_input_prompts.get("box", None)) is not None: + point_coords = input_prompt.reshape(1, 2, 2) + point_labels = torch.as_tensor([[2, 3]], device=self.device) + + if (input_prompt := merged_input_prompts.get("points", None)) is not None: + point_coords = input_prompt.reshape(1, 1, 2) + point_labels = torch.as_tensor([[1]], device=self.device) - if "annotations" in merged_input_prompts: + if (input_prompt := merged_input_prompts.get("annotations", None)) is not None: raise NotImplementedError() - point_coords = torch.stack(point_coords, dim=0).unsqueeze(0) - point_labels = torch.tensor([point_labels], device=self.device) masks = self._predict_masks( image_embeddings=image_embeddings, point_coords=point_coords, @@ -324,35 +317,38 @@ def learn( original_size=original_size.unsqueeze(0), is_cascade=False, ) - reference_prompt[masks] += 1 - reference_prompt = torch.clip(reference_prompt, 0, 1) + ref_mask[masks] += 1 + ref_mask = torch.clip(ref_mask, 0, 1).to(torch.float32) - ref_mask = reference_prompt.to(torch.float32) - reference_feat = None + ref_feat = None default_threshold_reference = deepcopy(self.prompt_getter.default_threshold_reference) - while reference_feat is None: + while ref_feat is None: logger.info(f"[*] default_threshold_reference : {default_threshold_reference:.4f}") - reference_feat = self._generate_masked_features( - ref_feat, ref_mask, default_threshold_reference, padding=padding - ) + ref_feat = self._generate_masked_features(processed_embedding, ref_mask, default_threshold_reference) default_threshold_reference -= 0.05 - self.prompt_getter.set_reference(label, reference_feat, reference_prompt) + self.reference_info["reference_feats"][int(label.id)] = ref_feat.detach().cpu() + self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([int(label.id)]))), requires_grad=False) + ref_masks[int(label.id)] = ref_mask.detach().cpu() + + self.reference_info["used_indices"] = Parameter(self.reference_info["used_indices"].unsqueeze(0), requires_grad=False) + if return_outputs: + return self.reference_info, ref_masks @torch.no_grad() def infer( - self, images: torch.Tensor, original_size: torch.Tensor - ) -> List[List[DefaultDict[int, List[torch.Tensor]]]]: + self, images: Tensor, original_size: Tensor + ) -> List[List[DefaultDict[int, List[Tensor]]]]: """Zero-shot inference with reference features. Get target results by using reference features and target images' features. Args: - images (torch.Tensor): Given images for target results. - original_size (torch.Tensor): Original image size. + images (Tensor): Given images for target results. + original_size (Tensor): Original image size. Returns: - (List[List[DefaultDict[int, List[torch.Tensor]]]]): Target results. + (List[List[DefaultDict[int, List[Tensor]]]]): Target results. Lists wrapping results is following this order: 1. Target images 2. Tuple of predicted masks and used points gotten by point selection @@ -364,10 +360,10 @@ def infer( if image.ndim == 3: image = image.unsqueeze(0) - image_embeddings = self.image_encoder(images) + image_embedding = self.image_encoder(images) total_points_scores, total_bg_coords = self.prompt_getter( - image_embeddings=image_embeddings, original_size=original_size + image_embedding=image_embedding, original_size=original_size ) predicted_masks: defaultdict = defaultdict(list) used_points: defaultdict = defaultdict(list) @@ -389,11 +385,11 @@ def infer( point_coords = ResizeLongestSide.apply_coords( point_coords, original_size[0], self.config.model.image_size ) - point_labels = torch.tensor( + point_labels = torch.as_tensor( [1] + [0] * len(bg_coords), dtype=torch.float32, device=self.device ).unsqueeze(0) mask = self._predict_masks( - image_embeddings=image_embeddings, + image_embeddings=image_embedding, point_coords=point_coords, point_labels=point_labels, original_size=original_size, @@ -408,11 +404,11 @@ def infer( def __inspect_overlapping_areas( self, - predicted_masks: Dict[int, List[torch.Tensor]], - used_points: Dict[int, List[torch.Tensor]], + predicted_masks: Dict[int, List[Tensor]], + used_points: Dict[int, List[Tensor]], threshold_iou: float = 0.8, ): - def __calculate_mask_iou(mask1: torch.Tensor, mask2: torch.Tensor): + def __calculate_mask_iou(mask1: Tensor, mask2: Tensor): assert mask1.ndim == 2 and mask2.ndim == 2 intersection = torch.logical_and(mask1, mask2).sum().item() union = torch.logical_or(mask1, mask2).sum().item() @@ -446,15 +442,15 @@ def __calculate_mask_iou(mask1: torch.Tensor, mask2: torch.Tensor): def _predict_masks( self, - image_embeddings: torch.Tensor, - point_coords: torch.Tensor, - point_labels: torch.Tensor, - original_size: torch.Tensor, + image_embeddings: Tensor, + point_coords: Tensor, + point_labels: Tensor, + original_size: Tensor, is_cascade: bool = True, - ) -> torch.Tensor: + ) -> Tensor: """Predict target masks.""" - logits: torch.Tensor - scores: torch.Tensor + logits: Tensor + scores: Tensor for i in range(3): if i == 0: # First-step prediction @@ -479,7 +475,7 @@ def _predict_masks( coords = torch.nonzero(masks) y, x = coords[:, 0], coords[:, 1] box_coords = ResizeLongestSide.apply_coords( - torch.tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), + torch.as_tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), original_size[0], self.config.model.image_size, ) @@ -513,7 +509,6 @@ def training_step(self, batch, batch_idx) -> None: self.learn( images=batch["images"], processed_prompts=processed_prompts, - padding=batch.get("padding")[0], original_size=batch.get("original_size")[0], ) @@ -524,24 +519,24 @@ def predict_step(self, batch, batch_idx): def _preprocess_prompts( self, - bboxes: Optional[torch.Tensor] = None, - points: Optional[torch.Tensor] = None, - annotations: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - ) -> Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]: + bboxes: Optional[Tensor] = None, + points: Optional[Tensor] = None, + annotations: Optional[Tensor] = None, + labels: Optional[Tensor] = None, + ) -> Dict[ScoredLabel, List[Dict[str, Tensor]]]: """Preprocess prompts. Currently, preprocessing for bounding boxes is only supported. Args: - bboxes (torch.Tensor, optional): Bounding box prompts to be preprocessed. - points (torch.Tensor, optional): Point prompts to be preprocessed, to be supported. - annotations (torch.Tensor, optional): annotation prompts to be preprocessed, to be supported. - labels (torch.Tensor, optional): Assigned labels according to given prompts. + bboxes (Tensor, optional): Bounding box prompts to be preprocessed. + points (Tensor, optional): Point prompts to be preprocessed, to be supported. + annotations (Tensor, optional): annotation prompts to be preprocessed, to be supported. + labels (Tensor, optional): Assigned labels according to given prompts. Currently, it is only matched to bboxes, and it will be deprecated. Returns: - (defaultdict[ScoredLabel, List[Dict[str, torch.Tensor]]]): Processed and arranged each single prompt + (defaultdict[ScoredLabel, List[Dict[str, Tensor]]]): Processed and arranged each single prompt using label information as keys. Unlike other prompts, `annotation` prompts will be aggregated as single annotation. """ @@ -562,39 +557,31 @@ def _preprocess_prompts( def _generate_masked_features( self, - feats: torch.Tensor, - masks: torch.Tensor, + feats: Tensor, + masks: Tensor, threshold_mask: float, - padding: Optional[Union[Tuple[int, ...], torch.Tensor]] = None, - ) -> Tuple[torch.Tensor, ...]: + ) -> Tuple[Tensor, ...]: """Generate masked features. Args: - feats (torch.Tensor): Raw reference features. It will be filtered with masks. - masks (torch.Tensor): Reference masks used to filter features. + feats (Tensor): Raw reference features. It will be filtered with masks. + masks (Tensor): Reference masks used to filter features. threshold_mask (float): Threshold to control masked region. - padding (Union[Tuple[int, ...], torch.Tensor], optional): Padding size. Returns: - (torch.Tensor): Masked features. + (Tensor): Masked features. """ - if padding: - resized_size = ( - self.config.model.image_size - padding[1] - padding[3], - self.config.model.image_size - padding[0] - padding[2], - ) - else: - resized_size = (self.config.model.image_size, self.config.model.image_size) + scale_factor = self.config.model.image_size / max(masks.shape) # Post-process masks - masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), size=resized_size, mode="bilinear").squeeze() - masks = self._preprocess_masks(masks) + masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), scale_factor=scale_factor, mode="bilinear").squeeze() + masks = self._pad_to_square(masks) masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), size=feats.shape[0:2], mode="bilinear").squeeze() # Target feature extraction if (masks > threshold_mask).sum() == 0: # (for stability) there is no area to be extracted - return None, None + return None masked_feat = feats[masks > threshold_mask] masked_feat = masked_feat.mean(0).unsqueeze(0) @@ -602,16 +589,15 @@ def _generate_masked_features( return masked_feat - def _preprocess_masks(self, x: torch.Tensor) -> torch.Tensor: - """Normalize pixel values and pad to a square input. + def _pad_to_square(self, x: Tensor) -> Tensor: + """Pad to a square input. Args: - x (torch.Tensor): Mask to be padded. + x (Tensor): Mask to be padded. Returns: - (torch.Tensor): Padded mask. + (Tensor): Padded mask. """ - # Pad h, w = x.shape[-2:] padh = self.config.model.image_size - h padw = self.config.model.image_size - w @@ -620,9 +606,9 @@ def _preprocess_masks(self, x: torch.Tensor) -> torch.Tensor: def _postprocess_masks( self, - masks: torch.Tensor, - logits: torch.Tensor, - scores: torch.Tensor, + masks: Tensor, + logits: Tensor, + scores: Tensor, is_single: bool = False, ): """Post-process masks for cascaded post-refinements.""" @@ -645,13 +631,13 @@ def _postprocess_masks( best_idx = torch.argmax(scores[0]) return logits[:, best_idx], masks[0, best_idx] - def _update_value(self, target: Dict[str, Any], key: str, value: torch.Tensor) -> None: + def _update_value(self, target: Dict[str, Any], key: str, value: Tensor) -> None: """Update tensor to target dictionary. Args: target (Dict[str, Any]): Target dictionary to be updated. key (str): Key to be used for update. - value (torch.Tensor): Value to be used for update. + value (Tensor): Value to be used for update. """ if key in target: target[key] = torch.cat((target[key], value)) @@ -661,24 +647,24 @@ def _update_value(self, target: Dict[str, Any], key: str, value: torch.Tensor) - def _merge_prompts( self, label: ScoredLabel, - input_prompts: Dict[str, torch.Tensor], - processed_prompts: Dict[ScoredLabel, List[Dict[str, torch.Tensor]]], + input_prompts: Dict[str, Tensor], + processed_prompts: Dict[ScoredLabel, List[Dict[str, Tensor]]], use_only_background: bool = True, - ) -> Dict[str, torch.Tensor]: + ) -> Dict[str, Tensor]: """Merge target prompt and other prompts. Merge a foreground prompt and other prompts (background or prompts with other classes). Args: label (ScoredLabel): Label information. Background is 0 and other foregrounds are >= 0. - input_prompts (Dict[str, torch.Tensor]): A foreground prompt to be merged with other prompts. - processed_prompts (Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]): The whole class-wise prompts + input_prompts (Dict[str, Tensor]): A foreground prompt to be merged with other prompts. + processed_prompts (Dict[ScoredLabel, List[Dict[str, Tensor]]]): The whole class-wise prompts processed at _preprocess_prompts. use_only_background (bool): Whether merging only background prompt, defaults to True. It is applied to only point_coords. Returns: - (Dict[str, torch.Tensor]): Merged prompts. + (Dict[str, Tensor]): Merged prompts. """ merged_input_prompts = deepcopy(input_prompts) for other_label, other_input_prompts in processed_prompts.items(): diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index 8bff9bd55f7..aca6039d678 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -643,12 +643,16 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): elif module == "visual_prompting_prompt_getter": dummy_inputs = { "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float32), + "reference_feats": torch.randn(1, 1, 256, dtype=torch.float32), + "used_indices": torch.as_tensor([[0]], dtype=torch.int64), "original_size": torch.randint(low=0, high=image_size * 2, size=(1, 2), dtype=torch.int64), "threshold": torch.tensor([[0.1]], dtype=torch.float32), "num_bg_points": torch.randint(low=1, high=image_size, size=(1, 1), dtype=torch.int64), } output_names = ["total_points_scores", "total_bg_coords"] dynamic_axes = { + "reference_feats": {0: "num_labels"}, + "used_indices": {1: "num_labels"}, "total_points_scores": {0: "num_labels", 1: "num_points"}, "total_bg_coords": {0: "num_labels", 1: "num_points"}, } @@ -706,13 +710,6 @@ def save_model(self, output_model: ModelEntity) -> None: logger.info("Saving the model weights and reference features.") model_info = self.model.state_dict() - # TODO (sungchul): is there more efficient way not to manually add properties? - model_info.update( - { - "prompt_getter.reference_feats": self.model.prompt_getter.reference_feats, - "prompt_getter.reference_prompts": self.model.prompt_getter.reference_prompts, - } - ) buffer = io.BytesIO() torch.save(model_info, buffer) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py index 36225af9d4a..a703c7512c6 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py @@ -31,7 +31,6 @@ def test_collate_fn(): "points": [], "gt_masks": [Tensor([1, 2, 3])], "original_size": np.array([1, 3]), - "padding": [], "path": [], "labels": [], }, @@ -42,7 +41,6 @@ def test_collate_fn(): "points": [], "gt_masks": [Tensor([4, 5, 6])], "original_size": np.array([1, 3]), - "padding": [], "path": [], "labels": [], }, @@ -56,7 +54,6 @@ def test_collate_fn(): "original_size": [Tensor([1, 3]), Tensor([1, 3])], "path": [[], []], "labels": [[], []], - "padding": [[], []], } results = collate_fn(batch) @@ -73,7 +70,6 @@ def test_collate_fn(): assert torch.all(r == e) assert results["path"] == expected["path"] assert results["labels"] == expected["labels"] - assert results["padding"] == expected["padding"] class TestPad: @@ -88,22 +84,21 @@ class TestPad: bboxes=[[1, 1, 3, 3]], points=[[1, 1, 2, 2]], ), - ((0, 0, 0, 2), (3, 6, 6), [(4, 6)], [[1, 1, 3, 3]], [[1, 1, 2, 2]]), + ((3, 6, 6), [(4, 6)], [[1, 1, 3, 3]], [[1, 1, 2, 2]]), ), ( dict(images=torch.zeros((3, 4, 6)), gt_masks=[torch.zeros((4, 6))], bboxes=[[1, 1, 3, 3]], points=None), - ((0, 0, 0, 2), (3, 6, 6), [(4, 6)], [[1, 1, 3, 3]], None), + ((3, 6, 6), [(4, 6)], [[1, 1, 3, 3]], None), ), ], ) def test_call(self, item: Dict[str, Any], expected: Tuple[Any]): """Test __call__.""" pad_transform = Pad() - expected_padding, expected_images_shape, expected_gt_masks_shape, expected_bboxes, expected_points = expected + expected_images_shape, expected_gt_masks_shape, expected_bboxes, expected_points = expected result = pad_transform(item) - assert result["padding"] == expected_padding assert result["images"].shape == expected_images_shape assert len(result["gt_masks"]) == len(expected_gt_masks_shape) assert all(gt_mask.shape == shape for gt_mask, shape in zip(result["gt_masks"], expected_gt_masks_shape)) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 8d145bfcaaa..a3478ccb59a 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -9,6 +9,7 @@ from collections import OrderedDict from tests.test_suite.e2e_test_system import e2e_pytest_unit import torch +from torch import nn from omegaconf import DictConfig from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything import ( @@ -31,12 +32,6 @@ class TestPromptGetter: def setup(self) -> None: self.prompt_getter = PromptGetter(image_size=3, downsizing=1) - @e2e_pytest_unit - def test_initialize(self) -> None: - """Test initialize.""" - assert not self.prompt_getter.reference_feats - assert not self.prompt_getter.reference_prompts - @e2e_pytest_unit def test_set_default_thresholds(self) -> None: """Test set_default_thresholds.""" @@ -48,31 +43,6 @@ def test_set_default_thresholds(self) -> None: assert self.prompt_getter.default_threshold_reference == 0.5 assert self.prompt_getter.default_threshold_target == 0.7 - @e2e_pytest_unit - def test_set_reference(self) -> None: - """Test set_reference.""" - self.prompt_getter.set_reference( - label=MockScoredLabel(label=1), - reference_feats=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), - reference_prompts=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), - ) - - assert self.prompt_getter.reference_feats[0].sum() == 0 - assert self.prompt_getter.reference_prompts[0].sum() == 0 - assert self.prompt_getter.reference_feats[1].sum() == 9 - assert self.prompt_getter.reference_prompts[1].sum() == 9 - - self.prompt_getter.set_reference( - label=MockScoredLabel(label=3), - reference_feats=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), - reference_prompts=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), - ) - - assert self.prompt_getter.reference_feats[2].sum() == 0 - assert self.prompt_getter.reference_prompts[2].sum() == 0 - assert self.prompt_getter.reference_feats[3].sum() == 9 - assert self.prompt_getter.reference_prompts[3].sum() == 9 - @e2e_pytest_unit @pytest.mark.parametrize("result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])]) def test_forward(self, mocker, result_point_selection: torch.Tensor) -> None: @@ -81,12 +51,13 @@ def test_forward(self, mocker, result_point_selection: torch.Tensor) -> None: self.prompt_getter, "get_prompt_candidates", return_value=(result_point_selection, torch.zeros(1, 2))) - image_embeddings = torch.ones(1, 4, 4, 4) - self.prompt_getter.reference_feats = torch.rand(1, 1, 4) + image_embedding = torch.ones(1, 4, 4, 4) + reference_feats = torch.rand(1, 1, 4) + used_indices = torch.as_tensor([[0]]) original_size = torch.tensor((self.prompt_getter.image_size, self.prompt_getter.image_size), dtype=torch.int64) total_points_scores, total_bg_coords = self.prompt_getter( - image_embeddings=image_embeddings, original_size=original_size + image_embedding=image_embedding, reference_feats=reference_feats, used_indices=used_indices, original_size=original_size ) assert total_points_scores.shape[0] == 1 @@ -100,15 +71,14 @@ def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tenso "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" ) mocker.patch.object(self.prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2))) - image_embeddings = torch.ones(1, 4, 4, 4) - self.prompt_getter.reference_feats = torch.rand(1, 1, 4) - label = torch.tensor([[0]], dtype=torch.int64) + image_embedding = torch.ones(1, 4, 4, 4) + reference_feat = torch.rand(1, 4) original_size = torch.tensor( [[self.prompt_getter.image_size, self.prompt_getter.image_size]], dtype=torch.int64 ) points_scores, bg_coords = self.prompt_getter.get_prompt_candidates( - image_embeddings=image_embeddings, label=label, original_size=original_size + image_embedding=image_embedding, reference_feat=reference_feat, original_size=original_size ) assert torch.all(points_scores == result_point_selection) @@ -154,13 +124,18 @@ def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): [ None, { - "prompt_getter.reference_feats": "prompt_getter.reference_feats", - "prompt_getter.reference_prompts": "prompt_getter.reference_prompts", + "reference_info": nn.ParameterDict({ + "reference_feats": "reference_feats", + "used_indices": "used_indices"}) }, ], ) - def test_init(self, set_zero_shot_segment_anything, state_dict: Dict[str, Any]) -> None: + def test_init(self, set_zero_shot_segment_anything, state_dict: Optional[Dict[str, Any]]) -> None: """Test __init__.""" + if state_dict is not None: + zero_shot_segment_anything_for_init_weights = set_zero_shot_segment_anything() + state_dict.update(zero_shot_segment_anything_for_init_weights.state_dict()) + zero_shot_segment_anything = set_zero_shot_segment_anything(state_dict=state_dict) assert zero_shot_segment_anything.config.model.freeze_image_encoder @@ -168,8 +143,8 @@ def test_init(self, set_zero_shot_segment_anything, state_dict: Dict[str, Any]) assert zero_shot_segment_anything.config.model.freeze_mask_decoder if state_dict: - zero_shot_segment_anything.prompt_getter.reference_feats = "prompt_getter.reference_feats" - zero_shot_segment_anything.prompt_getter.reference_prompts = "prompt_getter.reference_prompts" + zero_shot_segment_anything.reference_info.reference_feats = "reference_feats" + zero_shot_segment_anything.reference_info.used_indices = "used_indices" @e2e_pytest_unit def test_set_default_config(self, set_zero_shot_segment_anything) -> None: @@ -197,19 +172,19 @@ def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: mocker.patch.object( zero_shot_segment_anything, "_predict_masks", - return_value=torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + return_value=torch.tensor([[[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]]]), ) + mocker.patch.object(zero_shot_segment_anything, "_generate_masked_features", return_value=torch.ones(1, 256)) - processed_prompts = {MockScoredLabel(label=1, name="label"): [{"box": torch.tensor([[0, 0, 1, 1]])}]} + processed_prompts = {MockScoredLabel(label=0, name="label"): [{"box": torch.tensor([[0, 0, 1, 1]])}]} zero_shot_segment_anything.learn( images=torch.ones((1, 3, 4, 4)), processed_prompts=processed_prompts, - padding=(0, 0, 0, 0), original_size=torch.tensor((4, 4)), ) - assert zero_shot_segment_anything.prompt_getter.reference_feats.shape == (2, 1, 3) - assert zero_shot_segment_anything.prompt_getter.reference_prompts.shape == (2, 4, 4) + assert zero_shot_segment_anything.reference_info.reference_feats.shape == (1, 1, 256) + assert zero_shot_segment_anything.reference_info.used_indices == torch.as_tensor([0]) @e2e_pytest_unit @pytest.mark.parametrize("expected", [[torch.ones((4, 4)) / 2, torch.tensor([0.0, 0.0, 0.5])]]) @@ -221,8 +196,8 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect ) zero_shot_segment_anything = set_zero_shot_segment_anything() - zero_shot_segment_anything.prompt_getter.reference_feats = torch.rand(1, 1, 4) - zero_shot_segment_anything.prompt_getter.reference_prompts = torch.zeros((4, 4)) + zero_shot_segment_anything.reference_info.reference_feats = torch.rand(1, 1, 256) + zero_shot_segment_anything.reference_info.used_indices = {0} mocker.patch.object( SegmentAnything, "forward", return_value=(torch.ones(1, 4, 4, 4), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) ) @@ -286,12 +261,12 @@ def test_generate_masked_features(self, set_zero_shot_segment_anything) -> None: assert masked_feat.shape == (1, 1) @e2e_pytest_unit - def test_preprocess_masks(self, set_zero_shot_segment_anything) -> None: - """Test _preprocess_masks.""" + def test_pad_to_square(self, set_zero_shot_segment_anything) -> None: + """Test _pad_to_square.""" zero_shot_segment_anything = set_zero_shot_segment_anything() zero_shot_segment_anything.config.model.image_size = 16 - result = zero_shot_segment_anything._preprocess_masks(x=torch.ones(1, 1, 8, 8)) + result = zero_shot_segment_anything._pad_to_square(x=torch.ones(1, 1, 8, 8)) assert result[:8, :8].sum() == 8**2 assert result[:8, 8:].sum() == 0 diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index 051ddcdfe8f..d2a5852e0af 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -379,10 +379,9 @@ def test_save_model(self, mocker): mocker_otx_model = mocker.patch("otx.api.entities.model.ModelEntity") mocker_io_bytes_io = mocker.patch("io.BytesIO") mocker_torch_save = mocker.patch("torch.save") + mocker.patch.object(self.zero_shot_task.model, "state_dict", return_value={"reference_info.reference_feats": None}) - self.zero_shot_task.model.prompt_getter = mocker.MagicMock() - self.zero_shot_task.model.prompt_getter.reference_feats.return_value = "reference_feats" - self.zero_shot_task.model.prompt_getter.reference_prompts.return_value = "reference_prompts" + self.zero_shot_task.model.reference_info = "reference_info" self.zero_shot_task.save_model(mocker_otx_model) diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index fb585e22a87..ac16b4cea4e 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -185,6 +185,7 @@ class MockScoredLabel: def __init__(self, label: int, name: str = "background"): self.name = name self.id_ = label + self.id = label class MockPromptGetter(nn.Module): From e661f0aae45544381cb19ed6e79d3d4b4508efc6 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 13 Feb 2024 13:52:57 +0900 Subject: [PATCH 06/28] Update `infer` pipeline --- .../zero_shot_segment_anything.py | 69 +++++++--- .../test_zero_shot_segment_anything.py | 128 ++++++++++++++++-- 2 files changed, 171 insertions(+), 26 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 1f26ae73b28..8330b312b22 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -221,7 +221,7 @@ def set_default_config(self) -> DictConfig: def set_empty_reference_info(self) -> None: """Set empty reference information.""" reference_feats: Parameter = Parameter(torch.as_tensor([], dtype=torch.float32), requires_grad=False) - used_indices: Parameter = Parameter(torch.as_tensor([], dtype=torch.int64), requires_grad=False) + used_indices: Parameter = Parameter(torch.as_tensor([[]], dtype=torch.int64), requires_grad=False) self.reference_info = ParameterDict( { "reference_feats": reference_feats, @@ -233,7 +233,7 @@ def set_empty_reference_info(self) -> None: def initialize_reference_info(self, largest_label: int) -> None: """Initialize reference information.""" self.reference_info["reference_feats"] = Parameter(torch.zeros(largest_label + 1, 1, 256), requires_grad=False) - self.reference_info["used_indices"] = Parameter(torch.as_tensor([], dtype=torch.int64), requires_grad=False) + self.reference_info["used_indices"] = Parameter(torch.as_tensor([[]], dtype=torch.int64), requires_grad=False) self.is_reference_info_empty = False def expand_reference_info(self, new_largest_label: int) -> None: @@ -328,16 +328,20 @@ def learn( default_threshold_reference -= 0.05 self.reference_info["reference_feats"][int(label.id)] = ref_feat.detach().cpu() - self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([int(label.id)]))), requires_grad=False) + self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([[int(label.id)]])), dim=1), requires_grad=False) ref_masks[int(label.id)] = ref_mask.detach().cpu() - self.reference_info["used_indices"] = Parameter(self.reference_info["used_indices"].unsqueeze(0), requires_grad=False) if return_outputs: return self.reference_info, ref_masks @torch.no_grad() def infer( - self, images: Tensor, original_size: Tensor + self, + images: Tensor, + reference_feats: Tensor, + used_indices: Tensor, + original_size: Tensor, + is_cascade: bool = False, ) -> List[List[DefaultDict[int, List[Tensor]]]]: """Zero-shot inference with reference features. @@ -345,7 +349,10 @@ def infer( Args: images (Tensor): Given images for target results. + reference_feats (Tensor): Reference features for target prediction. + used_indices (Tensor): To check which indices of reference features are validate. original_size (Tensor): Original image size. + is_cascade (bool): Whether use cascade inference. Defaults to False. Returns: (List[List[DefaultDict[int, List[Tensor]]]]): Target results. @@ -363,7 +370,10 @@ def infer( image_embedding = self.image_encoder(images) total_points_scores, total_bg_coords = self.prompt_getter( - image_embedding=image_embedding, original_size=original_size + image_embedding=image_embedding, + reference_feats=reference_feats, + used_indices=used_indices, + original_size=original_size ) predicted_masks: defaultdict = defaultdict(list) used_points: defaultdict = defaultdict(list) @@ -382,9 +392,7 @@ def infer( continue point_coords = torch.cat((points_score[:2].unsqueeze(0), bg_coords), dim=0).unsqueeze(0) - point_coords = ResizeLongestSide.apply_coords( - point_coords, original_size[0], self.config.model.image_size - ) + point_coords = self._preprocess_coords(point_coords, original_size[0], self.config.model.image_size) point_labels = torch.as_tensor( [1] + [0] * len(bg_coords), dtype=torch.float32, device=self.device ).unsqueeze(0) @@ -393,22 +401,23 @@ def infer( point_coords=point_coords, point_labels=point_labels, original_size=original_size, + is_cascade=is_cascade, ) predicted_masks[label].append((mask * points_score[2]).detach().cpu()) used_points[label].append(points_score.detach().cpu()) # check overlapping area between different label masks - self.__inspect_overlapping_areas(predicted_masks, used_points) + self._inspect_overlapping_areas(predicted_masks, used_points) total_results.append([predicted_masks, used_points]) return total_results - def __inspect_overlapping_areas( + def _inspect_overlapping_areas( self, predicted_masks: Dict[int, List[Tensor]], used_points: Dict[int, List[Tensor]], threshold_iou: float = 0.8, ): - def __calculate_mask_iou(mask1: Tensor, mask2: Tensor): + def _calculate_mask_iou(mask1: Tensor, mask2: Tensor): assert mask1.ndim == 2 and mask2.ndim == 2 intersection = torch.logical_and(mask1, mask2).sum().item() union = torch.logical_or(mask1, mask2).sum().item() @@ -426,17 +435,17 @@ def __calculate_mask_iou(mask1: Tensor, mask2: Tensor): overlapped_label = [] overlapped_other_label = [] for (im, mask), (jm, other_mask) in product(enumerate(masks), enumerate(other_masks)): - if __calculate_mask_iou(mask, other_mask) > threshold_iou: + if _calculate_mask_iou(mask, other_mask) > threshold_iou: if used_points[label][im][2] > used_points[other_label][jm][2]: overlapped_other_label.append(jm) else: overlapped_label.append(im) - for im in overlapped_label[::-1]: + for im in sorted(list(set(overlapped_label)), reverse=True): masks.pop(im) used_points[label].pop(im) - for jm in overlapped_other_label[::-1]: + for jm in sorted(list(set(overlapped_other_label)), reverse=True): other_masks.pop(jm) used_points[other_label].pop(jm) @@ -514,7 +523,11 @@ def training_step(self, batch, batch_idx) -> None: def predict_step(self, batch, batch_idx): """Predict step for `infer`.""" - results = self.infer(images=batch["images"], original_size=batch.get("original_size")[0].unsqueeze(0)) + results = self.infer( + images=batch["images"], + reference_feats=self.reference_info["reference_feats"], + used_indices=self.reference_info["used_indices"], + original_size=batch.get("original_size")[0].unsqueeze(0)) return [result[0] for result in results] # tmp: only mask def _preprocess_prompts( @@ -554,6 +567,30 @@ def _preprocess_prompts( processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x[0].id_)) # type: ignore[assignment] return processed_prompts + + def _preprocess_coords( + self, + coords: Tensor, + ori_shape: Union[list[int], tuple[int, int], Tensor], + target_length: int, + ) -> Tensor: + """Expects a torch tensor of length 2 in the final dimension. + + Requires the original image size in (H, W) format. + + Args: + coords (Tensor): Coordinates tensor. + ori_shape (Union[list[int], tuple[int, int], Tensor]): Original size of image. + target_length (int): The length of the longest side of the image. + + Returns: + (Tensor): Resized coordinates. + """ + old_h, old_w = ori_shape + new_h, new_w = self.get_prepadded_size(ori_shape, target_length) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords def _generate_masked_features( self, diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index a3478ccb59a..58fa1554ed7 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -124,17 +124,17 @@ def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): [ None, { - "reference_info": nn.ParameterDict({ - "reference_feats": "reference_feats", - "used_indices": "used_indices"}) + "reference_info.reference_feats": torch.zeros(1), + "reference_info.used_indices": torch.zeros(1, dtype=torch.int64), }, ], ) def test_init(self, set_zero_shot_segment_anything, state_dict: Optional[Dict[str, Any]]) -> None: """Test __init__.""" if state_dict is not None: - zero_shot_segment_anything_for_init_weights = set_zero_shot_segment_anything() - state_dict.update(zero_shot_segment_anything_for_init_weights.state_dict()) + zero_shot_segment_anything_for_init_weights = set_zero_shot_segment_anything().state_dict() + zero_shot_segment_anything_for_init_weights.update(state_dict) + state_dict = zero_shot_segment_anything_for_init_weights zero_shot_segment_anything = set_zero_shot_segment_anything(state_dict=state_dict) @@ -143,8 +143,11 @@ def test_init(self, set_zero_shot_segment_anything, state_dict: Optional[Dict[st assert zero_shot_segment_anything.config.model.freeze_mask_decoder if state_dict: - zero_shot_segment_anything.reference_info.reference_feats = "reference_feats" - zero_shot_segment_anything.reference_info.used_indices = "used_indices" + assert zero_shot_segment_anything.reference_info.reference_feats == torch.zeros(1) + assert zero_shot_segment_anything.reference_info.used_indices == torch.zeros(1, dtype=torch.int64) + + assert zero_shot_segment_anything.reference_info.reference_feats.dtype == torch.float32 + assert zero_shot_segment_anything.reference_info.used_indices.dtype == torch.int64 @e2e_pytest_unit def test_set_default_config(self, set_zero_shot_segment_anything) -> None: @@ -196,19 +199,124 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect ) zero_shot_segment_anything = set_zero_shot_segment_anything() - zero_shot_segment_anything.reference_info.reference_feats = torch.rand(1, 1, 256) - zero_shot_segment_anything.reference_info.used_indices = {0} + reference_feats = nn.Parameter(torch.rand(1, 1, 256), requires_grad=False) + used_indices = nn.Parameter(torch.as_tensor([[0]], dtype=torch.int64), requires_grad=False) mocker.patch.object( SegmentAnything, "forward", return_value=(torch.ones(1, 4, 4, 4), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) ) total_results = zero_shot_segment_anything.infer( - images=torch.ones((1, 3, 4, 4)), original_size=torch.tensor([[4, 4]], dtype=torch.int64) + images=torch.ones((1, 3, 4, 4)), + reference_feats=reference_feats, + used_indices=used_indices, + original_size=torch.tensor([[4, 4]], dtype=torch.int64) ) for i, results in enumerate(total_results[0]): for _, result in results.items(): assert torch.equal(result[0], expected[i]) + + @e2e_pytest_unit + def test_inspect_overlapping_areas(self, mocker, set_zero_shot_segment_anything) -> None: + """Test _inspect_overlapping_areas.""" + mocker.patch("otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_checkpoint") + zero_shot_segment_anything = set_zero_shot_segment_anything() + predicted_masks = { + 0: [ + torch.tensor( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + torch.tensor( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + torch.tensor( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + ], + ), + ], + 1: [ + torch.tensor( + [ + [0, 0, 0, 1, 1, 0], + [0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + torch.tensor( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + ], + ), + torch.tensor( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + ], + ), + torch.tensor( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + ], + } + used_points = { + 0: [ + torch.tensor([0, 0, 0.5]), # to be removed + torch.tensor([2, 2, 0.5]), + torch.tensor([1, 4, 0.5]), + ], + 1: [ + torch.tensor([3, 0, 0.5]), + torch.tensor([4, 4, 0.5]), + torch.tensor([1, 4, 0.3]), # to be removed + torch.tensor([0, 0, 0.7]), + ], + } + + zero_shot_segment_anything._inspect_overlapping_areas(predicted_masks, used_points, threshold_iou=0.5) + + assert len(predicted_masks[0]) == 2 + assert len(predicted_masks[1]) == 3 + assert all(torch.tensor([2, 2, 0.5]) == used_points[0][0]) + assert all(torch.tensor([0, 0, 0.7]) == used_points[1][2]) @e2e_pytest_unit def test_predict_masks(self, mocker, set_zero_shot_segment_anything) -> None: From a6add5d58c65578d7a35cff088f0cf4ae639615c Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 13 Feb 2024 16:35:19 +0900 Subject: [PATCH 07/28] [WIP] Update for export --- .../model_wrappers/openvino_models.py | 15 ++++++++++++- .../zero_shot_segment_anything.py | 21 ++++++++++--------- .../visual_prompting/tasks/inference.py | 2 +- 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 432d0010e09..2ed3bcec381 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -17,7 +17,6 @@ from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple, Union -import cv2 import numpy as np from openvino.model_api.adapters.inference_adapter import InferenceAdapter from openvino.model_api.models import ImageModel, SegmentationModel @@ -71,6 +70,20 @@ def parameters(cls) -> Dict[str, Any]: # noqa: D102 parameters.update({"sim_threshold": NumericalValue(value_type=float, default_value=0.5, min=0, max=1)}) parameters.update({"num_bg_points": NumericalValue(value_type=int, default_value=1, min=0, max=1024)}) return parameters + + def _get_inputs(self): + """Defines the model inputs for images and additional info.""" + image_blob_names, image_info_blob_names = [], [] + for name, metadata in self.inputs.items(): + if len(metadata.shape) == 4: + image_blob_names.append(name) + else: + image_info_blob_names.append(name) + if not image_blob_names: + self.raise_error( + "Failed to identify the input for the image: no 4D input layer found" + ) + return image_blob_names, image_info_blob_names class Decoder(SegmentationModel): diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 8330b312b22..d85a33b5359 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 from collections import OrderedDict, defaultdict +import os from copy import deepcopy from itertools import product from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union @@ -54,7 +55,8 @@ def forward( ) -> Tuple[Tensor, Tensor]: """Get prompt candidates.""" device = image_embedding.device - threshold = threshold.to(device) + threshold = threshold.squeeze().to(device) + num_bg_points = num_bg_points.squeeze() total_points_scores: Tensor = torch.zeros(used_indices.max() + 1, 0, 3, device=device) total_bg_coords: Tensor = torch.zeros(used_indices.max() + 1, num_bg_points, 2, device=device) @@ -85,12 +87,12 @@ def get_prompt_candidates( image_embedding: Tensor, reference_feat: Tensor, original_size: Tensor, - threshold: Tensor = torch.as_tensor([[0.0]], dtype=torch.float32), - num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), + threshold: Union[Tensor, float] = 0., + num_bg_points: Union[Tensor, int] = 1, device: Union[torch.device, str] = torch.device("cpu"), ) -> Tuple[Tensor, Tensor]: """Get prompt candidates from given reference and target features.""" - assert original_size.dim() == 2 and threshold.dim() == 2 and num_bg_points.dim() == 2 + assert original_size.dim() == 2 target_feat = image_embedding.squeeze() c_feat, h_feat, w_feat = target_feat.shape @@ -115,14 +117,14 @@ def _point_selection( self, mask_sim: Tensor, original_size: Tensor, - threshold: Tensor, - num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), + threshold: Union[Tensor, float] = 0., + num_bg_points: Union[Tensor, int] = 1, ) -> Tuple[Tensor, Tensor]: """Select point used as point prompts.""" _, w_sim = mask_sim.shape # Top-last point selection - bg_indices = mask_sim.flatten().topk(num_bg_points[0, 0], largest=False)[1] + bg_indices = mask_sim.flatten().topk(num_bg_points, largest=False)[1] bg_x = (bg_indices // w_sim).unsqueeze(0) bg_y = bg_indices - bg_x * w_sim bg_coords = torch.cat((bg_y, bg_x), dim=0).permute(1, 0) @@ -483,11 +485,10 @@ def _predict_masks( has_mask_input = self.has_mask_inputs[1].to(self.device) coords = torch.nonzero(masks) y, x = coords[:, 0], coords[:, 1] - box_coords = ResizeLongestSide.apply_coords( + box_coords = self._preprocess_coords( torch.as_tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), original_size[0], - self.config.model.image_size, - ) + self.config.model.image_size) point_coords = torch.cat((point_coords, box_coords), dim=1) point_labels = torch.cat((point_labels, self.point_labels_box.to(self.device)), dim=1) diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index aca6039d678..5e8e6945701 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -646,7 +646,7 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "reference_feats": torch.randn(1, 1, 256, dtype=torch.float32), "used_indices": torch.as_tensor([[0]], dtype=torch.int64), "original_size": torch.randint(low=0, high=image_size * 2, size=(1, 2), dtype=torch.int64), - "threshold": torch.tensor([[0.1]], dtype=torch.float32), + "threshold": torch.as_tensor([[0.1]], dtype=torch.float32), "num_bg_points": torch.randint(low=1, high=image_size, size=(1, 1), dtype=torch.int64), } output_names = ["total_points_scores", "total_bg_coords"] From df28d1a247b0c8d205b71609f0f9cb45c0600568 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 15 Feb 2024 13:19:21 +0900 Subject: [PATCH 08/28] Update data & forward pipeline for multiple prompts --- .../model_wrappers/openvino_models.py | 38 +++--- .../pytorch_lightning/callbacks/inference.py | 2 +- .../pytorch_lightning/datasets/dataset.py | 129 +++++++++++------- .../datasets/pipelines/sam_transforms.py | 7 +- .../datasets/pipelines/transforms.py | 6 +- .../visual_prompters/segment_anything.py | 39 +++--- .../configs/sam_tiny_vit/config.yaml | 2 + .../visual_prompting/tasks/openvino.py | 8 +- .../datasets/pipelines/test_sam_transforms.py | 4 - .../datasets/pipelines/test_transforms.py | 116 +++++++++++----- .../datasets/test_dataset.py | 20 +-- .../visual_prompters/test_segment_anything.py | 8 +- 12 files changed, 229 insertions(+), 150 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 2ed3bcec381..128e92687e4 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -98,6 +98,9 @@ def __init__( preload: bool = False, ): super().__init__(model_adapter, configuration, preload) + + self.mask_input = np.zeros((1, 1, 256, 256), dtype=np.float32) + self.has_mask_input = np.zeros((1, 1), dtype=np.float32) @classmethod def parameters(cls): # noqa: D102 @@ -112,22 +115,25 @@ def _get_outputs(self): def preprocess(self, inputs: Dict[str, Any], meta: Dict[str, Any]) -> List[Dict[str, Any]]: """Preprocess prompts.""" processed_prompts = [] - # TODO (sungchul): process points - for bbox, label in zip(inputs["bboxes"], inputs["labels"]): - # TODO (sungchul): add condition to check whether using bbox or point - point_coords = self._apply_coords(bbox.reshape(-1, 2, 2), inputs["original_size"]) - point_labels = np.array([2, 3], dtype=np.float32).reshape((-1, 2)) - processed_prompts.append( - { - "point_coords": point_coords, - "point_labels": point_labels, - # TODO (sungchul): how to generate mask_input and has_mask_input - "mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32), - "has_mask_input": np.zeros((1, 1), dtype=np.float32), - "orig_size": np.array(inputs["original_size"], dtype=np.int64).reshape((-1, 2)), - "label": label, - } - ) + for prompt_name in ["bboxes", "points"]: + for prompt, label in zip(inputs.get(prompt_name), inputs["labels"].get(prompt_name, [])): + if prompt_name == "bboxes": + point_coords = self._apply_coords(prompt.reshape(-1, 2, 2), inputs["original_size"]) + point_labels = np.array([2, 3], dtype=np.float32).reshape(-1, 2) + else: + point_coords = self._apply_coords(prompt.reshape(-1, 1, 2), inputs["original_size"]) + point_labels = np.array([1], dtype=np.float32).reshape(-1, 1) + + processed_prompts.append( + { + "point_coords": point_coords, + "point_labels": point_labels, + "mask_input": self.mask_input, + "has_mask_input": self.has_mask_input, + "orig_size": np.asarray(inputs["original_size"], dtype=np.int64).reshape(-1, 2), + "label": label, + } + ) return processed_prompts def _apply_coords(self, coords: np.ndarray, original_size: Union[List[int], Tuple[int, int]]) -> np.ndarray: diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py index 1dc39b7cc3f..16960cb32ad 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py @@ -57,7 +57,7 @@ def on_predict_epoch_end(self, _trainer: Trainer, _pl_module: LightningModule, o for output in outputs[0]: pred_masks.append(output["masks"][0]) iou_predictions.append(output["iou_predictions"][0]) - pred_labels.append(output["labels"][0]) + pred_labels.append(output["labels"][0]["bboxes"] + output["labels"][0]["points"]) for dataset_item, pred_mask, iou_prediction, labels in zip( self.otx_dataset, pred_masks, iou_predictions, pred_labels diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py index 476a2c09d69..82dead10c87 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py @@ -31,6 +31,7 @@ ResizeLongestSide, collate_fn, ) +from collections import defaultdict from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.datasets import DatasetEntity from otx.api.entities.image import Image @@ -130,13 +131,6 @@ def generate_bbox_from_mask(gt_mask: np.ndarray, width: int, height: int) -> Lis return generate_bbox(x_min, y_min, x_max, y_max, width, height) -def generate_point_from_mask(gt_mask: np.ndarray) -> np.ndarray: - """Randomly generate point from given mask.""" - candidates = np.where(gt_mask == 1) - index = np.random.permutation(len(candidates))[0] - return candidates[index] - - class OTXVisualPromptingDataset(Dataset): """Visual Prompting Dataset Adaptor. @@ -149,13 +143,32 @@ class OTXVisualPromptingDataset(Dataset): """ def __init__( - self, dataset: DatasetEntity, image_size: int, mean: List[float], std: List[float], offset_bbox: int = 0 + self, + mode: Subset, + dataset: DatasetEntity, + image_size: int, + mean: List[float], + std: List[float], + offset_bbox: int = 0, + use_point: bool = False, + use_bbox: bool = False, ) -> None: - + self.mode = mode self.dataset = dataset self.transform = get_transform(image_size, mean, std) self.offset_bbox = offset_bbox self.labels = dataset.get_labels() + + if not use_bbox and not use_point: + # if both are False, use bbox as default + use_bbox = True + self.prob = 1.0 # if using only bbox prompt + if use_bbox and use_point: + # if using both prompts, divide prob into both + self.prob = 0.5 + if not use_bbox and use_point: + # if using only point prompt + self.prob = 0.0 def __len__(self) -> int: """Get size of the dataset. @@ -166,21 +179,23 @@ def __len__(self) -> int: return len(self.dataset) @staticmethod - def get_prompts(dataset_item: DatasetItemEntity, dataset_labels: List[LabelEntity]) -> Dict[str, Any]: + def get_prompts(dataset_item: DatasetItemEntity, dataset_labels: List[LabelEntity], prob: float = 1., mode: Subset = Subset.TESTING) -> Dict[str, Any]: """Get propmts from dataset_item. Args: dataset_item (DatasetItemEntity): Dataset item entity. dataset_labels (List[LabelEntity]): Label information. + prob (float): Probability of which prompts will be generated. + mode (Subset): To check which mode is used between training, validation, and testing. Returns: Dict[str, Any]: Processed prompts with ground truths. """ width, height = dataset_item.width, dataset_item.height - bboxes: List[List[int]] = [] - points: List = [] # TBD + bboxes: List[np.ndarray] = [] + points: List[np.ndarray] = [] gt_masks: List[np.ndarray] = [] - labels: List[ScoredLabel] = [] + labels: defaultdict[str, List[ScoredLabel]] = defaultdict(list) for annotation in dataset_item.get_annotations(labels=dataset_labels, include_empty=False, preserve_id=True): if isinstance(annotation.shape, Image): # use mask as-is @@ -192,25 +207,36 @@ def get_prompts(dataset_item: DatasetItemEntity, dataset_labels: List[LabelEntit continue if gt_mask.sum() == 0: - # pass no gt + # pass no gt or very small region continue - gt_masks.append(gt_mask) - - # generate bbox based on gt_mask - bbox = generate_bbox_from_mask(gt_mask, width, height) - bboxes.append(bbox) - # TODO (sungchul): generate random points from gt_mask - - # add labels - labels.extend(annotation.get_labels(include_empty=False)) + gt_masks.append(gt_mask) - bboxes = np.array(bboxes) + mask_points = np.nonzero(gt_mask) + if np.random.rand() < prob: + # generate bbox based on gt_mask + bbox = generate_bbox_from_mask(gt_mask, width, height) + bboxes.append(bbox) + labels["bboxes"].extend(annotation.get_labels(include_empty=False)) + else: + # generate point based on gt_mask + if mode == Subset.TRAINING: + # get random point from the mask + idx_chosen = np.random.permutation(len(mask_points[0]))[0] # noqa: NPY002 + point = np.array([mask_points[1][idx_chosen], mask_points[0][idx_chosen]]) + else: + # get averaged point + point = np.array([mask_points[1].mean(), mask_points[0].mean()]) + points.append(point) + labels["points"].extend(annotation.get_labels(include_empty=False)) + + bboxes = np.array(bboxes, dtype=np.float32) if len(bboxes) > 0 else np.zeros((0, 4), dtype=np.float32) + points = np.array(points, dtype=np.float32) if len(points) > 0 else np.zeros((0, 2), dtype=np.float32) return dict( original_size=np.array((height, width), dtype=np.int64), gt_masks=gt_masks, bboxes=bboxes, - points=points, # TODO (sungchul): update point information + points=points, labels=labels, ) @@ -226,7 +252,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: dataset_item = self.dataset[index] item: Dict[str, Union[int, Tensor]] = {"index": index, "images": dataset_item.numpy} - prompts = self.get_prompts(dataset_item, self.labels) + prompts = self.get_prompts(dataset_item, self.labels, self.prob, self.mode) if len(prompts["gt_masks"]) == 0: return { "images": [], @@ -238,7 +264,6 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: "labels": [], } - prompts["bboxes"] = np.array(prompts["bboxes"]) item.update({**prompts, "path": dataset_item.media.path}) item = self.transform(item) return item @@ -247,20 +272,6 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: class OTXZeroShotVisualPromptingDataset(OTXVisualPromptingDataset): """Visual Prompting for Zero-shot learning Dataset Adaptor.""" - def __init__( - self, - dataset: DatasetEntity, - image_size: int, - mean: List[float], - std: List[float], - generate_point: bool = False, - generate_bbox: bool = False, - **kwargs, - ) -> None: - super().__init__(dataset, image_size, mean, std, offset_bbox=0) - self.generate_point = generate_point - self.generate_bbox = generate_bbox - def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: """Get dataset item. @@ -273,7 +284,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: dataset_item = self.dataset[index] item: Dict[str, Union[int, Tensor]] = {"index": index, "images": dataset_item.numpy} - prompts = self.get_prompts(dataset_item, self.labels) # , self.generate_point, self.generate_bbox) + prompts = self.get_prompts(dataset_item, self.labels, self.prob) item.update({**prompts, "path": dataset_item.media.path}) item = self.transform(item) return item @@ -314,12 +325,12 @@ def __init__( ) self.config["train_batch_size"] = 1 - self.kwargs.update( - { - "generate_point": self.config.get("generate_point", False), - "generate_bbox": self.config.get("generate_bbox", False), - } - ) + self.kwargs.update( + { + "use_point": self.config.get("use_point", False), + "use_bbox": self.config.get("use_bbox", False), + } + ) self.train_otx_dataset: DatasetEntity self.val_otx_dataset: DatasetEntity @@ -340,6 +351,7 @@ def setup(self, stage: Optional[str] = None) -> None: std = self.config.normalize.std if stage == "fit" or stage is None: self.train_dataset = self.DATASETS[self.train_type]( + mode=Subset.TRAINING, dataset=self.dataset.get_subset(Subset.TRAINING), image_size=image_size, mean=mean, @@ -351,17 +363,32 @@ def setup(self, stage: Optional[str] = None) -> None: # self.val_dataset = None if self.train_type == TrainType.Incremental: self.val_dataset = self.DATASETS[self.train_type]( - dataset=self.dataset.get_subset(Subset.VALIDATION), image_size=image_size, mean=mean, std=std + mode=Subset.VALIDATION, + dataset=self.dataset.get_subset(Subset.VALIDATION), + image_size=image_size, + mean=mean, + std=std, + **self.kwargs, ) if stage == "test": self.test_dataset = self.DATASETS[self.train_type]( - dataset=self.dataset.get_subset(Subset.TESTING), image_size=image_size, mean=mean, std=std + mode=Subset.TESTING, + dataset=self.dataset.get_subset(Subset.TESTING), + image_size=image_size, + mean=mean, + std=std, + **self.kwargs, ) if stage == "predict": self.predict_dataset = self.DATASETS[self.train_type]( - dataset=self.dataset, image_size=image_size, mean=mean, std=std, **self.kwargs + mode=Subset.TESTING, + dataset=self.dataset.get_subset(Subset.TESTING), + image_size=image_size, + mean=mean, + std=std, + **self.kwargs ) def summary(self): diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py index 06d04ea817d..33c1fbb9b59 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py @@ -38,8 +38,7 @@ def __call__(self, item: Dict[str, Union[List, Tensor]]) -> Dict[str, Union[List ) item["gt_masks"] = [torch.as_tensor(gt_mask) for gt_mask in item["gt_masks"]] item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"], self.target_length) - if item["points"]: - item["points"] = self.apply_coords(item["points"], item["original_size"], self.target_length) + item["points"] = self.apply_coords(item["points"], item["original_size"], self.target_length) return item @classmethod @@ -78,9 +77,9 @@ def apply_coords( old_h, old_w = original_size new_h, new_w = cls.get_preprocess_shape(original_size[0], original_size[1], target_length) if isinstance(coords, np.ndarray): - coords = coords.astype(float) + coords = coords.astype(np.float32) else: - coords = coords.to(torch.float) + coords = coords.to(torch.float32) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) return coords diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py index 05b0693a39a..dd1abddf740 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py @@ -34,13 +34,13 @@ def _convert_empty_to_none(x: str, dtype: torch.dtype = torch.float32) -> List: List: List of batch data. """ func = torch.stack if x == "gt_masks" else torch.tensor - items = [func(item[x]).to(dtype) for item in batch if item[x] is not None] - return None if len(items) == 0 else items + items = [func(item[x]).to(dtype) if len(item[x]) > 0 else None for item in batch] + return items index = [item["index"] for item in batch] images = torch.stack([item["images"] for item in batch]) bboxes = _convert_empty_to_none("bboxes") - points = None # TBD + points = _convert_empty_to_none("points") gt_masks = _convert_empty_to_none("gt_masks", torch.int32) original_size = _convert_empty_to_none("original_size") path = [item["path"] for item in batch] diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index f6b59922baf..9ac772ce4ad 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -386,23 +386,30 @@ def forward_train( image_embeddings = self.image_encoder(images) pred_masks = [] ious = [] - for embedding, bbox in zip(image_embeddings, bboxes): - sparse_embeddings, dense_embeddings = self.prompt_encoder( - points=points, - boxes=bbox, - masks=masks, - ) - - low_res_masks, iou_predictions = self.mask_decoder( - image_embeddings=embedding.unsqueeze(0), - image_pe=self.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=False, # when given multiple prompts. if there is single prompt True would be better. - ) + for idx, embedding in enumerate(image_embeddings): + low_res_masks, iou_predictions = [], [] + for idx_prompt, prompt in enumerate([bboxes[idx], points[idx]]): + if prompt is None: + continue + + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=(prompt.unsqueeze(1), torch.ones(len(prompt), 1, device=prompt.device)) if idx_prompt == 1 else None, + boxes=prompt if idx_prompt == 0 else None, + masks=None, + ) + + _low_res_masks, _iou_predictions = self.mask_decoder( + image_embeddings=embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=False, # when given multiple prompts. if there is single prompt True would be better. # noqa: E501 + ) + low_res_masks.append(_low_res_masks) + iou_predictions.append(_iou_predictions) - pred_masks.append(low_res_masks) - ious.append(iou_predictions) + pred_masks.append(torch.cat(low_res_masks, dim=0)) + ious.append(torch.cat(iou_predictions, dim=0)) return pred_masks, ious diff --git a/src/otx/algorithms/visual_prompting/configs/sam_tiny_vit/config.yaml b/src/otx/algorithms/visual_prompting/configs/sam_tiny_vit/config.yaml index 36e9748338b..f0dd50ca827 100644 --- a/src/otx/algorithms/visual_prompting/configs/sam_tiny_vit/config.yaml +++ b/src/otx/algorithms/visual_prompting/configs/sam_tiny_vit/config.yaml @@ -15,6 +15,8 @@ dataset: - 57.12 - 57.375 offset_bbox: 20 # randomness for generating bounding box, pixel + use_point: false + use_bbox: false model: name: SAM diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 7751713aa47..d550f1fb33d 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -149,11 +149,15 @@ def __init__( self.transform = get_transform() # TODO (sungchul): insert args def pre_process( - self, dataset_item: DatasetItemEntity, extra_processing: bool = False + self, dataset_item: DatasetItemEntity, extra_processing: bool = False, use_bbox: bool = False, use_point: bool = False, ) -> Tuple[Dict[str, Any], Dict[str, Any], List[Dict[str, Any]]]: """Pre-process function of OpenVINO Visual Prompting Inferencer for image encoder.""" + if use_bbox and use_point: + logger.warning("If both use_bbox and use_point are set, bboxes and points will be generated randomly.") + + prob = 1. if not use_point else 0. if not use_bbox and use_point else 0.5 images, meta = self.model["image_encoder"].preprocess(dataset_item.numpy, extra_processing) - prompts = OTXVisualPromptingDataset.get_prompts(dataset_item, self.labels) # to be replaced + prompts = OTXVisualPromptingDataset.get_prompts(dataset_item, self.labels, prob=prob) prompts = self.model["decoder"].preprocess(prompts, meta) return images, meta, prompts # type: ignore diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py index 2636627d2b9..dca279d5175 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py @@ -20,10 +20,6 @@ class TestResizeLongestSide: def setup(self): self.resize_longest_side = ResizeLongestSide(8) - @e2e_pytest_unit - def test_call(self): - """Test __call__.""" - @e2e_pytest_unit @pytest.mark.parametrize( "image,expected", diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py index a703c7512c6..c9e32b6f28e 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 # -from typing import Any, Dict, Tuple +from typing import Any, Dict, Tuple, List import pytest import torch @@ -21,53 +21,97 @@ @e2e_pytest_unit -def test_collate_fn(): - """Test collate_fn.""" - batch = [ +@pytest.mark.parametrize("batch,expected",[ + ( + [ + { + "index": 0, + "images": Tensor([1, 2, 3]), + "bboxes": Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), + "points": torch.zeros((0, 2)), + "gt_masks": [Tensor([1, 2, 3])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + { + "index": 1, + "images": Tensor([4, 5, 6]), + "bboxes": Tensor([[9, 10, 11, 12]]), + "points": torch.zeros((0, 2)), + "gt_masks": [Tensor([4, 5, 6])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + ], { - "index": 0, - "images": Tensor([1, 2, 3]), - "bboxes": np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), - "points": [], - "gt_masks": [Tensor([1, 2, 3])], - "original_size": np.array([1, 3]), - "path": [], - "labels": [], - }, + "index": [0, 1], + "images": Tensor([[1, 2, 3], [4, 5, 6]]), + "bboxes": [Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), Tensor([[9, 10, 11, 12]])], + "points": [None, None], + "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], + "original_size": [Tensor([1, 3]), Tensor([1, 3])], + "path": [[], []], + "labels": [[], []], + } + ), + ( + [ + { + "index": 0, + "images": Tensor([1, 2, 3]), + "bboxes": torch.zeros((0, 4)), + "points": Tensor([[1, 1]]), + "gt_masks": [Tensor([1, 2, 3])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + { + "index": 1, + "images": Tensor([4, 5, 6]), + "bboxes": torch.zeros((0, 4)), + "points": Tensor([[2, 2]]), + "gt_masks": [Tensor([4, 5, 6])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + ], { - "index": 1, - "images": Tensor([4, 5, 6]), - "bboxes": np.array([[9, 10, 11, 12]]), - "points": [], - "gt_masks": [Tensor([4, 5, 6])], - "original_size": np.array([1, 3]), - "path": [], - "labels": [], - }, - ] - expected = { - "index": [0, 1], - "images": Tensor([[1, 2, 3], [4, 5, 6]]), - "bboxes": [Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), Tensor([[9, 10, 11, 12]])], - "points": None, - "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], - "original_size": [Tensor([1, 3]), Tensor([1, 3])], - "path": [[], []], - "labels": [[], []], - } - + "index": [0, 1], + "images": Tensor([[1, 2, 3], [4, 5, 6]]), + "bboxes": [None, None], + "points": [Tensor([[1, 1]]), Tensor([[2, 2]])], + "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], + "original_size": [Tensor([1, 3]), Tensor([1, 3])], + "path": [[], []], + "labels": [[], []], + } + ) +]) +def test_collate_fn(batch: List[Dict[str, Any]], expected: Dict[str, Any]): + """Test collate_fn.""" results = collate_fn(batch) assert results["index"] == expected["index"] assert torch.all(results["images"] == expected["images"]) for r, e in zip(results["bboxes"], expected["bboxes"]): - assert torch.all(r == e) - assert results["points"] == expected["points"] + if r is not None and e is not None: + assert torch.all(r == e) + + for r, e in zip(results["points"], expected["points"]): + if r is not None and e is not None: + assert torch.all(r == e) + assert len(results["gt_masks"]) == len(expected["gt_masks"]) for r, e in zip(results["gt_masks"], expected["gt_masks"]): assert torch.all(r == e) + for r, e in zip(results["original_size"], expected["original_size"]): assert torch.all(r == e) + assert results["path"] == expected["path"] assert results["labels"] == expected["labels"] diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py index 99a76c3b17b..6e5211c1899 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py @@ -19,7 +19,6 @@ generate_bbox, generate_bbox_from_mask, get_transform, - generate_point_from_mask, ) from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( MultipleInputsCompose, @@ -146,11 +145,6 @@ def test_generate_bbox_from_mask(mocker) -> None: assert bbox[3] >= 0 and bbox[3] <= height -@e2e_pytest_unit -def test_generate_point_from_mask() -> None: - """TODO""" - - class TestOTXVIsualPromptingDataset: @e2e_pytest_unit def test_len(self, mocker, dataset_polygon, transform, image_size, mean, std) -> None: @@ -159,7 +153,7 @@ def test_len(self, mocker, dataset_polygon, transform, image_size, mean, std) -> "otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset.get_transform", return_value=transform, ) - otx_dataset = OTXVisualPromptingDataset(dataset_polygon, image_size, mean, std) + otx_dataset = OTXVisualPromptingDataset("testing", dataset_polygon, image_size, mean, std) assert len(otx_dataset) == 4 @e2e_pytest_unit @@ -173,7 +167,7 @@ def test_getitem( return_value=transform, ) dataset = dataset_mask if use_mask else dataset_polygon - otx_dataset = OTXVisualPromptingDataset(dataset, image_size, mean, std) + otx_dataset = OTXVisualPromptingDataset("testing", dataset, image_size, mean, std) item = otx_dataset[0] @@ -189,7 +183,7 @@ def test_getitem( assert isinstance(item["gt_masks"], list) assert isinstance(item["gt_masks"][0], np.ndarray) assert isinstance(item["bboxes"], np.ndarray) - assert item["points"] == [] + assert len(item["points"]) == 0 class TestOTXZeroShotVisualPromptingDataset: @@ -209,7 +203,7 @@ def test_getitem( return_value=transform, ) dataset = dataset_mask if use_mask else dataset_polygon - otx_dataset = OTXZeroShotVisualPromptingDataset(dataset, image_size, mean, std) + otx_dataset = OTXZeroShotVisualPromptingDataset("testing", dataset, image_size, mean, std) item = otx_dataset[0] @@ -225,7 +219,7 @@ def test_getitem( assert isinstance(item["gt_masks"], list) assert isinstance(item["gt_masks"][0], np.ndarray) assert isinstance(item["bboxes"], np.ndarray) - assert item["points"] == [] + assert len(item["points"]) == 0 class TestOTXVisualPromptingDataModule: @@ -248,8 +242,8 @@ def test_init_zeroshot(self, set_datamodule): datamodule = set_datamodule(train_type=TrainType.Zeroshot) assert datamodule.config.get("train_batch_size") == 1 - assert "generate_point" in datamodule.kwargs - assert "generate_bbox" in datamodule.kwargs + assert "use_point" in datamodule.kwargs + assert "use_bbox" in datamodule.kwargs @e2e_pytest_unit def test_setup(self, mocker, set_datamodule) -> None: diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index f5dce1d8396..24ae52aa3cb 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -351,7 +351,7 @@ def test_forward_train(self) -> None: images = torch.zeros((1, 3, 4, 4)) bboxes = torch.zeros((1)) - results = sam.forward_train(images=images, bboxes=bboxes, points=None) + results = sam.forward_train(images=images, bboxes=bboxes, points=[None]) pred_masks, ious = results assert len(bboxes) == len(pred_masks) == len(ious) @@ -378,7 +378,7 @@ def test_training_step(self, mocker, loss_type: str, expected: Tensor) -> None: images=torch.ones((1, 3, 4, 4)), gt_masks=[torch.Tensor([[0, 1, 1, 0] for _ in range(4)]).to(torch.int32)], bboxes=torch.Tensor([[0, 0, 1, 1]]), - points=[], + points=[None], padding=[[0, 0, 0, 0]], original_size=[[4, 4]], ) @@ -420,7 +420,7 @@ def test_validation_step(self, mocker) -> None: images=torch.ones((1, 3, 4, 4)), gt_masks=[torch.Tensor([[0, 1, 1, 0] for _ in range(4)]).to(torch.int32)], bboxes=torch.Tensor([[0, 0, 1, 1]]), - points=[], + points=[None], path=None, labels=None, padding=[0], @@ -471,7 +471,7 @@ def test_predict_step(self, mocker, return_logits: bool, expected: Tensor) -> No batch = dict( images=torch.zeros((1, 3, 4, 4)), bboxes=torch.Tensor([[0, 0, 1, 1]]), - points=[], + points=[None], path=None, labels=None, padding=[0], From b8d89e42a5b9806da15c598a0bb051bcdece176f Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Fri, 16 Feb 2024 00:23:16 +0900 Subject: [PATCH 09/28] (WIP) Update zsl pipeline --- .../pytorch_lightning/callbacks/inference.py | 2 +- .../pytorch_lightning/datasets/dataset.py | 10 +- .../datasets/pipelines/sam_transforms.py | 6 +- .../zero_shot_segment_anything.py | 354 ++++++++++-------- .../zero_shot_sam_tiny_vit/config.yaml | 5 +- .../visual_prompting/tasks/inference.py | 2 + .../model_wrappers/test_openvino_models.py | 37 +- .../callbacks/test_inference_callback.py | 6 +- .../test_zero_shot_segment_anything.py | 78 ++-- .../visual_prompting/tasks/test_inference.py | 4 +- 10 files changed, 289 insertions(+), 215 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py index 16960cb32ad..df751eeaba5 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py @@ -57,7 +57,7 @@ def on_predict_epoch_end(self, _trainer: Trainer, _pl_module: LightningModule, o for output in outputs[0]: pred_masks.append(output["masks"][0]) iou_predictions.append(output["iou_predictions"][0]) - pred_labels.append(output["labels"][0]["bboxes"] + output["labels"][0]["points"]) + pred_labels.append(output["labels"][0].get("bboxes", []) + output["labels"][0].get("points", [])) for dataset_item, pred_mask, iou_prediction, labels in zip( self.otx_dataset, pred_masks, iou_predictions, pred_labels diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py index 82dead10c87..83ada16d9c6 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py @@ -286,7 +286,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: prompts = self.get_prompts(dataset_item, self.labels, self.prob) item.update({**prompts, "path": dataset_item.media.path}) - item = self.transform(item) + return item @@ -413,7 +413,7 @@ def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, Data shuffle=True, batch_size=self.config.train_batch_size, num_workers=self.config.num_workers, - collate_fn=collate_fn, + collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, ) def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]: @@ -427,7 +427,7 @@ def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]: shuffle=False, batch_size=self.config.val_batch_size, num_workers=self.config.num_workers, - collate_fn=collate_fn, + collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, ) def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]: @@ -441,7 +441,7 @@ def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]: shuffle=False, batch_size=self.config.test_batch_size, num_workers=self.config.num_workers, - collate_fn=collate_fn, + collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, ) def predict_dataloader(self) -> Union[DataLoader, List[DataLoader]]: @@ -455,5 +455,5 @@ def predict_dataloader(self) -> Union[DataLoader, List[DataLoader]]: shuffle=False, batch_size=1, num_workers=self.config.num_workers, - collate_fn=collate_fn, + collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, ) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py index 33c1fbb9b59..a47efc97540 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py @@ -37,8 +37,10 @@ def __call__(self, item: Dict[str, Union[List, Tensor]]) -> Dict[str, Union[List self.apply_image(item["images"], self.target_length).transpose((2, 0, 1)), dtype=torch.get_default_dtype() ) item["gt_masks"] = [torch.as_tensor(gt_mask) for gt_mask in item["gt_masks"]] - item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"], self.target_length) - item["points"] = self.apply_coords(item["points"], item["original_size"], self.target_length) + if "bboxes" in item: + item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"], self.target_length) + if "points" in item: + item["points"] = self.apply_coords(item["points"], item["original_size"], self.target_length) return item @classmethod diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index d85a33b5359..ed902e2c4be 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -5,23 +5,26 @@ from collections import OrderedDict, defaultdict import os +import json from copy import deepcopy from itertools import product +import ast from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union import torch +import numpy as np +import cv2 from omegaconf import DictConfig from torch import nn, Tensor from torch.nn import Parameter, ParameterDict from torch.nn import functional as F -from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( - ResizeLongestSide, -) from otx.api.entities.scored_label import ScoredLabel from otx.utils.logger import get_logger +from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset import get_transform from .segment_anything import SegmentAnything +from datetime import datetime logger = get_logger() @@ -55,6 +58,7 @@ def forward( ) -> Tuple[Tensor, Tensor]: """Get prompt candidates.""" device = image_embedding.device + original_size = original_size.squeeze() threshold = threshold.squeeze().to(device) num_bg_points = num_bg_points.squeeze() @@ -92,8 +96,6 @@ def get_prompt_candidates( device: Union[torch.device, str] = torch.device("cpu"), ) -> Tuple[Tensor, Tensor]: """Get prompt candidates from given reference and target features.""" - assert original_size.dim() == 2 - target_feat = image_embedding.squeeze() c_feat, h_feat, w_feat = target_feat.shape target_feat = target_feat / target_feat.norm(dim=0, keepdim=True) @@ -101,12 +103,12 @@ def get_prompt_candidates( sim = reference_feat.to(device) @ target_feat sim = sim.reshape(1, 1, h_feat, w_feat) - sim = ZeroShotSegmentAnything.postprocess_masks(sim, self.image_size, original_size[0]) + sim = ZeroShotSegmentAnything.postprocess_masks(sim, self.image_size, original_size) threshold = (threshold == 0) * self.default_threshold_target + threshold points_scores, bg_coords = self._point_selection( mask_sim=sim[0, 0], - original_size=original_size[0], + original_size=original_size, threshold=threshold, num_bg_points=num_bg_points, ) @@ -167,9 +169,13 @@ def _point_selection( class ZeroShotSegmentAnything(SegmentAnything): """Zero-shot learning module using Segment Anything.""" - def __init__(self, config: Optional[DictConfig] = None, state_dict: Optional[OrderedDict] = None) -> None: + def __init__(self, config: Optional[DictConfig] = None, manual_config_update: Optional[Dict] = None, state_dict: Optional[OrderedDict] = None) -> None: if config is None: config = self.set_default_config() + + if manual_config_update is not None and isinstance(manual_config_update, dict) and len(manual_config_update) > 0: + for k, v in manual_config_update.items(): + exec(f"config.{k} = {v}") # check freeze conditions for condition in ["freeze_image_encoder", "freeze_prompt_encoder", "freeze_mask_decoder"]: @@ -177,10 +183,9 @@ def __init__(self, config: Optional[DictConfig] = None, state_dict: Optional[Ord logger.warning(f"config.model.{condition}(=False) must be set to True, changed.") setattr(config.model, condition, True) - super().__init__(config, None) + super().__init__(config, state_dict) + self.set_empty_reference_info() - self._register_load_state_dict_pre_hook(self.load_state_dict_pre_hook) - self.load_checkpoint(state_dict) self.prompt_getter = PromptGetter(image_size=config.model.image_size) self.prompt_getter.set_default_thresholds( @@ -191,6 +196,13 @@ def __init__(self, config: Optional[DictConfig] = None, state_dict: Optional[Ord self.point_labels_box = torch.as_tensor([[2, 3]], dtype=torch.float32) self.has_mask_inputs = [torch.as_tensor([[0.0]]), torch.as_tensor([[1.0]])] + self.transforms = get_transform( + image_size=config.model.image_size, + mean=config.dataset.normalize.mean, + std=config.dataset.normalize.std) + + self.path_reference_info = "vpm_zsl_reference_infos/{}/reference_info.pt" + def load_state_dict_pre_hook(self, state_dict: dict[str, Any], prefix: str = "", *args, **kwargs) -> None: """Load reference info manually.""" _reference_feats: Tensor = state_dict.get("reference_info.reference_feats", torch.as_tensor([], dtype=torch.float32)) @@ -216,6 +228,12 @@ def set_default_config(self) -> DictConfig: "freeze_prompt_encoder": True, "image_size": 1024, "mask_threshold": 0.0, + }, + "dataset": { + "normalize": { + "mean": [123.675, 116.28, 103.53], + "std": [58.395, 57.12, 57.375], + } } } ) @@ -232,9 +250,9 @@ def set_empty_reference_info(self) -> None: ) self.is_reference_info_empty = True - def initialize_reference_info(self, largest_label: int) -> None: + def initialize_reference_info(self) -> None: """Initialize reference information.""" - self.reference_info["reference_feats"] = Parameter(torch.zeros(largest_label + 1, 1, 256), requires_grad=False) + self.reference_info["reference_feats"] = Parameter(torch.zeros(0, 1, 256), requires_grad=False) self.reference_info["used_indices"] = Parameter(torch.as_tensor([[]], dtype=torch.int64), requires_grad=False) self.is_reference_info_empty = False @@ -242,20 +260,11 @@ def expand_reference_info(self, new_largest_label: int) -> None: """Expand reference info dimensions if newly given processed prompts have more lables.""" if new_largest_label > (cur_largest_label := len(self.reference_info["reference_feats"]) - 1): diff = new_largest_label - cur_largest_label - self.reference_info["reference_feats"] = F.pad( - self.reference_info["reference_feats"], - (0, 0, 0, 0, 0, diff), - value=0.0, - ) + padded_reference_feats = F.pad(self.reference_info["reference_feats"], (0, 0, 0, 0, 0, diff), value=0.0) + self.reference_info["reference_feats"] = Parameter(padded_reference_feats, requires_grad=False) @torch.no_grad() - def learn( - self, - images: Tensor, - processed_prompts: Dict[ScoredLabel, List[Dict[str, Tensor]]], - original_size: Tensor, - return_outputs: bool = False, - ) -> None: + def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[None, Tuple[ParameterDict, Tensor]]: """Get reference features. Using given images, get reference features and save it to PromptGetter. @@ -263,86 +272,102 @@ def learn( Currently, single batch is only supported. Args: - images (Tensor): Given images for reference features. - processed_prompts (Dict[ScoredLabel, List[Dict[str, Tensor]]]): The whole class-wise prompts - processed at _preprocess_prompts. - original_size (Tensor): Original image size. + batch (List[Dict[str, Any]]): List of dictionaries containing images, prompts, and metas. + `batch` must contain images, prompts with bboxes, points, annotations, and polygons. + reset_feat (bool): Whether reset reference_info. + For OTX standalone, resetting reference_info will be conducted in on_train_start. + For other frameworks, setting it to True is required to reset reference_info. Defaults to False. + + Returns: + (Tuple[ParameterDict, Tensor]): reference_info and ref_masks. """ - assert images.shape[0] == 1, "Only single batch is supported." + if reset_feat: + self.initialize_reference_info() + + # preprocess images and prompts + transformed_batch = [self.transforms(b.copy()) for b in batch] + processed_prompts = [self._preprocess_prompts(tb) for tb in transformed_batch] # initialize tensors to contain reference features and prompts - largest_label = max([int(label.id) for label in processed_prompts.keys()]) - if self.is_reference_info_empty: - self.initialize_reference_info(largest_label) - else: - self.expand_reference_info(largest_label) - # TODO(sungchul): consider who to handle multiple reference features, currently replace it # noqa: TD003 - - image_embeddings = self.image_encoder(images) - processed_embedding = image_embeddings.squeeze().permute(1, 2, 0) + largest_label = max([int(label.id) for pp in processed_prompts for label in pp.keys()]) + self.expand_reference_info(largest_label) + # TODO(sungchul): consider who to handle multiple reference features, currently replace it + + batch_ref_masks: List[Tensor] = [] + for tb, pp in zip(transformed_batch, processed_prompts): + # assign components + images = tb["images"].unsqueeze(0).to(self.device) + original_size = torch.as_tensor(tb["original_size"]) - ref_masks = torch.zeros(largest_label + 1, *map(int, original_size)) - for label, input_prompts in processed_prompts.items(): - if label.name.lower() == "background": - # skip background - # TODO (sungchul): how to skip background class - continue - - # generate reference mask - # TODO (sungchul): ensemble multi reference features (current : use merged masks) - ref_mask = torch.zeros(*map(int, original_size), dtype=torch.uint8, device=self.device) - for input_prompt in input_prompts: - if (anno_prompt := input_prompt.get("annotation", None)) is not None: - # directly use annotation information as a mask - ref_mask[anno_prompt == 1] += 1 - else: - merged_input_prompts: Dict[str, Tensor] = self._merge_prompts(label, input_prompt, processed_prompts) - # TODO (sungchul): they must be processed in `_merge_prompts` - # and it is required to be expanded to other prompts. - point_coords = [] - point_labels = [] - if (input_prompt := merged_input_prompts.get("box", None)) is not None: - point_coords = input_prompt.reshape(1, 2, 2) - point_labels = torch.as_tensor([[2, 3]], device=self.device) - - if (input_prompt := merged_input_prompts.get("points", None)) is not None: - point_coords = input_prompt.reshape(1, 1, 2) - point_labels = torch.as_tensor([[1]], device=self.device) - - if (input_prompt := merged_input_prompts.get("annotations", None)) is not None: - raise NotImplementedError() - - masks = self._predict_masks( - image_embeddings=image_embeddings, - point_coords=point_coords, - point_labels=point_labels, - original_size=original_size.unsqueeze(0), - is_cascade=False, - ) - ref_mask[masks] += 1 - ref_mask = torch.clip(ref_mask, 0, 1).to(torch.float32) - - ref_feat = None - default_threshold_reference = deepcopy(self.prompt_getter.default_threshold_reference) - while ref_feat is None: - logger.info(f"[*] default_threshold_reference : {default_threshold_reference:.4f}") - ref_feat = self._generate_masked_features(processed_embedding, ref_mask, default_threshold_reference) - default_threshold_reference -= 0.05 - - self.reference_info["reference_feats"][int(label.id)] = ref_feat.detach().cpu() - self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([[int(label.id)]])), dim=1), requires_grad=False) - ref_masks[int(label.id)] = ref_mask.detach().cpu() - - if return_outputs: - return self.reference_info, ref_masks + image_embeddings = self.image_encoder(images) + processed_embedding = image_embeddings.squeeze().permute(1, 2, 0) + + ref_masks = torch.zeros(largest_label + 1, *map(int, original_size)) + for label, input_prompts in pp.items(): + if label.name.lower() == "background": + # skip background + # TODO (sungchul): how to skip background class + continue + + # generate reference mask + # TODO (sungchul): ensemble multi reference features (current : use merged masks) + ref_mask = torch.zeros(*map(int, original_size), dtype=torch.uint8, device=self.device) + for input_prompt in input_prompts: + if (prompt := input_prompt.get("annotations", None)) is not None: + # directly use annotation information as a mask + ref_mask[prompt == 1] += 1 + elif (prompt := input_prompt.get("polygons", None)) is not None: + for polygon in prompt["polygons"]: + contour = [[int(point[0]), int(point[1])] for point in polygon] + mask_from_polygon = np.zeros(original_size, dtype=np.uint8) + mask_from_polygon = cv2.drawContours(mask_from_polygon, np.asarray([contour]), 0, 1, -1) + ref_mask[mask_from_polygon == 1] += 1 + elif (prompt := input_prompt.get("scribble_annotation", None)) is not None: + logger.warning(f"scribble_annotation is not supported yet.") + continue + elif (prompt := input_prompt.get("scribble_polygon", None)) is not None: + logger.warning(f"scribble_polygon is not supported yet.") + continue + else: + point_coords = [] + point_labels = [] + if (prompt := input_prompt.get("bboxes", None)) is not None: + point_coords = prompt["point_coords"].reshape(1, 2, 2) + + elif (prompt := input_prompt.get("points", None)) is not None: + point_coords = prompt["point_coords"].reshape(1, 1, 2) + + point_labels = prompt["point_labels"] + + masks = self._predict_masks( + image_embeddings=image_embeddings, + point_coords=point_coords, + point_labels=point_labels, + original_size=original_size, + is_cascade=False, + ) + ref_mask[masks] += 1 + ref_mask = torch.clip(ref_mask, 0, 1).to(torch.float32) + + ref_feat = None + default_threshold_reference = deepcopy(self.prompt_getter.default_threshold_reference) + while ref_feat is None: + logger.info(f"[*] default_threshold_reference : {default_threshold_reference:.4f}") + ref_feat = self._generate_masked_features(processed_embedding, ref_mask, default_threshold_reference) + default_threshold_reference -= 0.05 + + self.reference_info["reference_feats"][int(label.id)] = ref_feat.detach().cpu() + self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([[int(label.id)]])), dim=1), requires_grad=False) + ref_masks[int(label.id)] = ref_mask.detach().cpu() + batch_ref_masks.append(ref_masks) + return self.reference_info, batch_ref_masks @torch.no_grad() def infer( - self, - images: Tensor, + self, + batch: List[Dict[str, Any]], reference_feats: Tensor, used_indices: Tensor, - original_size: Tensor, is_cascade: bool = False, ) -> List[List[DefaultDict[int, List[Tensor]]]]: """Zero-shot inference with reference features. @@ -350,10 +375,9 @@ def infer( Get target results by using reference features and target images' features. Args: - images (Tensor): Given images for target results. + batch (List[Dict[str, Any]]): List of dictionaries containing images and metas. reference_feats (Tensor): Reference features for target prediction. used_indices (Tensor): To check which indices of reference features are validate. - original_size (Tensor): Original image size. is_cascade (bool): Whether use cascade inference. Defaults to False. Returns: @@ -362,21 +386,21 @@ def infer( 1. Target images 2. Tuple of predicted masks and used points gotten by point selection """ - assert images.shape[0] == 1, "Only single batch is supported." - - total_results = [] - for image in images: - if image.ndim == 3: - image = image.unsqueeze(0) - + # preprocess images and prompts + transformed_batch = [self.transforms(b.copy()) for b in batch] + + total_results: List[List[Tensor]] = [] + for tb in transformed_batch: + # assign components + images = tb["images"].unsqueeze(0).to(self.device) + original_size = torch.as_tensor(tb["original_size"]) + image_embedding = self.image_encoder(images) - total_points_scores, total_bg_coords = self.prompt_getter( image_embedding=image_embedding, reference_feats=reference_feats, used_indices=used_indices, - original_size=original_size - ) + original_size=original_size) predicted_masks: defaultdict = defaultdict(list) used_points: defaultdict = defaultdict(list) for label, (points_scores, bg_coords) in enumerate(zip(total_points_scores, total_bg_coords)): @@ -394,7 +418,7 @@ def infer( continue point_coords = torch.cat((points_score[:2].unsqueeze(0), bg_coords), dim=0).unsqueeze(0) - point_coords = self._preprocess_coords(point_coords, original_size[0], self.config.model.image_size) + point_coords = self._preprocess_coords(point_coords, original_size, self.config.model.image_size) point_labels = torch.as_tensor( [1] + [0] * len(bg_coords), dtype=torch.float32, device=self.device ).unsqueeze(0) @@ -418,7 +442,7 @@ def _inspect_overlapping_areas( predicted_masks: Dict[int, List[Tensor]], used_points: Dict[int, List[Tensor]], threshold_iou: float = 0.8, - ): + ) -> None: def _calculate_mask_iou(mask1: Tensor, mask2: Tensor): assert mask1.ndim == 2 and mask2.ndim == 2 intersection = torch.logical_and(mask1, mask2).sum().item() @@ -487,7 +511,7 @@ def _predict_masks( y, x = coords[:, 0], coords[:, 1] box_coords = self._preprocess_coords( torch.as_tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), - original_size[0], + original_size, self.config.model.image_size) point_coords = torch.cat((point_coords, box_coords), dim=1) point_labels = torch.cat((point_labels, self.point_labels_box.to(self.device)), dim=1) @@ -498,7 +522,7 @@ def _predict_masks( point_labels=point_labels, mask_input=mask_input, has_mask_input=has_mask_input, - orig_size=original_size, + orig_size=original_size.unsqueeze(0), ) masks = high_res_masks > self.config.model.mask_threshold _, masks = self._postprocess_masks(masks, logits, scores) @@ -506,48 +530,20 @@ def _predict_masks( def training_step(self, batch, batch_idx) -> None: """Training step for `learn`.""" - # TODO (sungchul): each prompt will be assigned with each label - bboxes = batch["bboxes"] - labels = batch["labels"] - # TODO (sungchul): support other below prompts - # points = batch["points"] - # annotations = batch["annotations"] - - # organize prompts based on label - processed_prompts = self._preprocess_prompts(bboxes=bboxes[0], labels=labels[0]) - - self.learn( - images=batch["images"], - processed_prompts=processed_prompts, - original_size=batch.get("original_size")[0], - ) + self.learn(batch) def predict_step(self, batch, batch_idx): """Predict step for `infer`.""" - results = self.infer( - images=batch["images"], - reference_feats=self.reference_info["reference_feats"], - used_indices=self.reference_info["used_indices"], - original_size=batch.get("original_size")[0].unsqueeze(0)) + results = self.infer(batch, self.reference_info["reference_feats"], self.reference_info["used_indices"]) return [result[0] for result in results] # tmp: only mask - def _preprocess_prompts( - self, - bboxes: Optional[Tensor] = None, - points: Optional[Tensor] = None, - annotations: Optional[Tensor] = None, - labels: Optional[Tensor] = None, - ) -> Dict[ScoredLabel, List[Dict[str, Tensor]]]: + def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[ScoredLabel, List[Dict[str, Tensor]]]: """Preprocess prompts. Currently, preprocessing for bounding boxes is only supported. Args: - bboxes (Tensor, optional): Bounding box prompts to be preprocessed. - points (Tensor, optional): Point prompts to be preprocessed, to be supported. - annotations (Tensor, optional): annotation prompts to be preprocessed, to be supported. - labels (Tensor, optional): Assigned labels according to given prompts. - Currently, it is only matched to bboxes, and it will be deprecated. + batch (Dict[str, Any]): Dictionary containing data and prompts information. Returns: (defaultdict[ScoredLabel, List[Dict[str, Tensor]]]): Processed and arranged each single prompt @@ -555,18 +551,37 @@ def _preprocess_prompts( as single annotation. """ processed_prompts = defaultdict(list) - # TODO (sungchul): will be updated - if bboxes is not None: - for bbox, label in zip(bboxes, labels): - processed_prompts[label].append({"box": bbox.reshape(-1, 4)}) - - if points: - pass - - if annotations: - pass - - processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x[0].id_)) # type: ignore[assignment] + for prompt_name in ["annotations", "polygons", "bboxes", "points"]: + prompts = batch.get(prompt_name, None) + labels = batch["labels"].get(prompt_name, None) + if prompts is None or len(prompts) == 0: + continue + for prompt, label in zip(prompts, labels): + # TODO (sungchul): revisit annotations and polygons + if prompt_name == "annotations": + processed_prompts[label].append({prompt_name: torch.as_tensor(prompt, device=self.device)}) + elif prompt_name == "polygons": + masks = [] + for polygon in prompt: + contour = [[int(point[0]), int(point[1])] for point in polygon] + mask_from_polygon = np.zeros(batch["original_size"], dtype=np.uint8) + mask_from_polygon = cv2.drawContours(mask_from_polygon, np.asarray([contour]), 0, 1, -1) + masks.append(mask_from_polygon) + processed_prompts[label].append({prompt_name: torch.tensor(prompt, device=self.device)}) + elif prompt_name == "bboxes": + processed_prompts[label].append({ + prompt_name: { + "point_coords": torch.as_tensor(prompt.reshape(-1, 2, 2), device=self.device), + "point_labels": torch.tensor([[1]], device=self.device), + }}) + elif prompt_name == "points": + processed_prompts[label].append({ + prompt_name: { + "point_coords": torch.as_tensor(prompt.reshape(-1, 2), device=self.device), + "point_labels": torch.tensor([[1]], device=self.device), + }}) + + processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x[0].id_)) return processed_prompts def _preprocess_coords( @@ -730,7 +745,30 @@ def set_metrics(self) -> None: def configure_optimizers(self) -> None: """Skip configure_optimizers unused in zero-shot learning.""" pass + + def _find_latest_reference_info(self) -> Union[str, None]: + """Find latest reference info to be used.""" + if len(stamps := sorted(os.listdir("vpm_zsl_reference_infos"), reverse=True)) > 0: + return stamps[0] + self.initialize_reference_info() + + def on_train_start(self) -> None: + """Called at the beginning of training after sanity check.""" + self.initialize_reference_info() + + def on_predict_start(self) -> None: + """Called at the beginning of predicting.""" + if (latest_stamp := self._find_latest_reference_info()) is not None: + latest_reference_info = self.path_reference_info.format(latest_stamp) + self.reference_info = torch.load(latest_reference_info) + self.reference_info.to(self.device) + logger.info(f"reference info saved at {latest_reference_info} was successfully loaded.") def training_epoch_end(self, outputs) -> None: - """Skip training_epoch_end unused in zero-shot learning.""" - pass + """Called in the training loop at the very end of the epoch.""" + if self.config.model.save_outputs: + path_reference_info = self.path_reference_info.format(datetime.now().strftime("%Y%m%d-%H%M%S")) + os.makedirs(os.path.dirname(path_reference_info), exist_ok=True) + torch.save(self.reference_info, path_reference_info) + json.dump(repr(self.trainer.datamodule.train_dataset.dataset), open(path_reference_info.replace("reference_info.pt", "reference_meta.json"), "w")) + logger.info(f"Saved reference info at {path_reference_info}") diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml index 097390fba0f..a50ea244fbd 100644 --- a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml @@ -15,8 +15,8 @@ dataset: - 57.12 - 57.375 offset_bbox: 0 - generate_point: false - generate_bbox: false + use_point: false + use_bbox: false model: name: SAM @@ -36,6 +36,7 @@ model: # zero-shot default_threshold_reference: 0.3 default_threshold_target: 0.65 + save_outputs: True # PL Trainer Args. Don't add extra parameter here. trainer: diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index 5e8e6945701..f5033bfb99a 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -710,6 +710,8 @@ def save_model(self, output_model: ModelEntity) -> None: logger.info("Saving the model weights and reference features.") model_info = self.model.state_dict() + model_info.pop("reference_info.reference_feats") + model_info.pop("reference_info.used_indices") buffer = io.BytesIO() torch.save(model_info, buffer) diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index 9b5093b004a..e6ba00166d0 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 # -from typing import Tuple +from typing import Tuple, Dict, Any import numpy as np import pytest @@ -82,17 +82,42 @@ def test_get_outputs(self): assert "upscaled_masks" == results @e2e_pytest_unit - def test_preprocess(self): + @pytest.mark.parametrize("prompts,expected", + [ + ( + { + "bboxes": [np.array([[1, 1], [2, 2]])], + "points": [], + "labels": {"bboxes": [1]}, + "original_size": (4, 4) + }, + { + "point_coords": (1, 2, 2), + "point_labels": (1, 2), + } + ), + ( + { + "bboxes": [], + "points": [np.array([[1, 1]])], + "labels": {"points": [1]}, + "original_size": (4, 4) + }, + { + "point_coords": (1, 1, 2), + "point_labels": (1, 1), + } + ) + ]) + def test_preprocess(self, prompts: Dict[str, Any], expected: Dict[str, Any]): """Test preprocess""" - prompts = {"bboxes": [np.array([[1, 1], [2, 2]])], "labels": [1], "original_size": (4, 4)} - results = self.decoder.preprocess(prompts, {}) assert isinstance(results, list) assert "point_coords" in results[0] - assert results[0]["point_coords"].shape == (1, 2, 2) + assert results[0]["point_coords"].shape == expected["point_coords"] assert "point_labels" in results[0] - assert results[0]["point_labels"].shape == (1, 2) + assert results[0]["point_labels"].shape == expected["point_labels"] assert "mask_input" in results[0] assert "has_mask_input" in results[0] assert "orig_size" in results[0] diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py index 3cb1710d76e..0bac7032fa1 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py @@ -77,9 +77,9 @@ def test_on_predict_epoch_end(self, use_mask: bool, expected: Any): { "masks": [torch.Tensor([[[0, 1, 0], [1, 1, 1], [0, 1, 0]]])], "iou_predictions": [torch.Tensor([[0.9]])], - "labels": [ - [ScoredLabel(label=LabelEntity("foreground", domain=Domain.VISUAL_PROMPTING), probability=0.0)], - ], + "labels": [{ + "bboxes": [ScoredLabel(label=LabelEntity("foreground", domain=Domain.VISUAL_PROMPTING), probability=0.0)], + }], } ] ] diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 58fa1554ed7..cba6b616aa3 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -9,6 +9,7 @@ from collections import OrderedDict from tests.test_suite.e2e_test_system import e2e_pytest_unit import torch +import numpy as np from torch import nn from omegaconf import DictConfig @@ -105,7 +106,7 @@ def test_point_selection(self, mask_sim: torch.Tensor, expected: torch.Tensor) - class TestZeroShotSegmentAnything: @pytest.fixture def set_zero_shot_segment_anything(self, monkeypatch): - def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): + def zero_shot_segment_anything(manual_config_update: Optional[Dict] = None, state_dict: Optional[OrderedDict] = None): monkeypatch.setattr( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SAMImageEncoder", MockImageEncoder, @@ -114,7 +115,7 @@ def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SAMMaskDecoder", MockMaskDecoder, ) - return ZeroShotSegmentAnything(state_dict=state_dict) + return ZeroShotSegmentAnything(manual_config_update=manual_config_update, state_dict=state_dict) return zero_shot_segment_anything @@ -123,18 +124,15 @@ def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): "state_dict", [ None, - { - "reference_info.reference_feats": torch.zeros(1), - "reference_info.used_indices": torch.zeros(1, dtype=torch.int64), - }, + {}, ], ) def test_init(self, set_zero_shot_segment_anything, state_dict: Optional[Dict[str, Any]]) -> None: """Test __init__.""" if state_dict is not None: - zero_shot_segment_anything_for_init_weights = set_zero_shot_segment_anything().state_dict() - zero_shot_segment_anything_for_init_weights.update(state_dict) - state_dict = zero_shot_segment_anything_for_init_weights + state_dict = set_zero_shot_segment_anything().state_dict() + state_dict.pop("reference_info.reference_feats") + state_dict.pop("reference_info.used_indices") zero_shot_segment_anything = set_zero_shot_segment_anything(state_dict=state_dict) @@ -143,8 +141,8 @@ def test_init(self, set_zero_shot_segment_anything, state_dict: Optional[Dict[st assert zero_shot_segment_anything.config.model.freeze_mask_decoder if state_dict: - assert zero_shot_segment_anything.reference_info.reference_feats == torch.zeros(1) - assert zero_shot_segment_anything.reference_info.used_indices == torch.zeros(1, dtype=torch.int64) + assert zero_shot_segment_anything.reference_info.reference_feats is not None + assert zero_shot_segment_anything.reference_info.used_indices is not None assert zero_shot_segment_anything.reference_info.reference_feats.dtype == torch.float32 assert zero_shot_segment_anything.reference_info.used_indices.dtype == torch.int64 @@ -171,7 +169,7 @@ def test_set_default_config(self, set_zero_shot_segment_anything) -> None: @e2e_pytest_unit def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: """Test learn.""" - zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything = set_zero_shot_segment_anything(manual_config_update={"model.image_size": 4}) mocker.patch.object( zero_shot_segment_anything, "_predict_masks", @@ -179,12 +177,15 @@ def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: ) mocker.patch.object(zero_shot_segment_anything, "_generate_masked_features", return_value=torch.ones(1, 256)) - processed_prompts = {MockScoredLabel(label=0, name="label"): [{"box": torch.tensor([[0, 0, 1, 1]])}]} - zero_shot_segment_anything.learn( - images=torch.ones((1, 3, 4, 4)), - processed_prompts=processed_prompts, - original_size=torch.tensor((4, 4)), - ) + batch = [{ + "images": np.ones((4, 4, 3), dtype=np.uint8), + "gt_masks": np.ones((4, 4), dtype=np.uint8), + "bboxes": np.array([[0, 0, 1, 1]], dtype=np.float32), + "points": np.zeros((0, 2), dtype=np.float32), + "labels": {"bboxes": [MockScoredLabel(label=0, name="label")]}, + "original_size": np.array([4, 4], dtype=np.int64) + }] + zero_shot_segment_anything.learn(batch=batch, reset_feat=True) assert zero_shot_segment_anything.reference_info.reference_feats.shape == (1, 1, 256) assert zero_shot_segment_anything.reference_info.used_indices == torch.as_tensor([0]) @@ -198,18 +199,22 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect MockPromptGetter, ) - zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything = set_zero_shot_segment_anything(manual_config_update={"model.image_size": 4}) reference_feats = nn.Parameter(torch.rand(1, 1, 256), requires_grad=False) used_indices = nn.Parameter(torch.as_tensor([[0]], dtype=torch.int64), requires_grad=False) mocker.patch.object( SegmentAnything, "forward", return_value=(torch.ones(1, 4, 4, 4), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) ) + batch = [{ + "images": np.ones((4, 4, 3), dtype=np.uint8), + "gt_masks": np.ones((4, 4), dtype=np.uint8), + "original_size": np.array([4, 4], dtype=np.int64) + }] total_results = zero_shot_segment_anything.infer( - images=torch.ones((1, 3, 4, 4)), + batch=batch, reference_feats=reference_feats, used_indices=used_indices, - original_size=torch.tensor([[4, 4]], dtype=torch.int64) ) for i, results in enumerate(total_results[0]): @@ -332,28 +337,27 @@ def test_predict_masks(self, mocker, set_zero_shot_segment_anything) -> None: image_embeddings=torch.rand(1), point_coords=torch.rand(1, 2, 2), point_labels=torch.randint(low=0, high=2, size=(1, 2)), - original_size=torch.tensor([[8, 8]], dtype=torch.int64), + original_size=torch.tensor([8, 8], dtype=torch.int64), ) assert mask.shape == (8, 8) @e2e_pytest_unit def test_preprocess_prompts(self, set_zero_shot_segment_anything) -> None: - """Test _preprocess_prompts. - - TODO (sungchul) - - get inputs grouped as label and prompts - - use points and annotations. - """ + """Test _preprocess_prompts.""" zero_shot_segment_anything = set_zero_shot_segment_anything() - bboxes = [torch.tensor([0, 0, 1, 1])] - labels = [MockScoredLabel(label=1)] - processed_prompts = zero_shot_segment_anything._preprocess_prompts( - bboxes=bboxes, - labels=labels, - ) - - # processed_prompts = {labels[0]: [{"box": torch.tensor([[0, 0, 1, 1]])}]} - assert torch.equal(processed_prompts[labels[0]][0].get("box")[0], bboxes[0]) + transformed_batch = { + "bboxes": torch.tensor([[0, 0, 1, 1]]), + "points": torch.tensor([[2, 2]]), + "labels": {"bboxes": [MockScoredLabel(label=1)], "points": [MockScoredLabel(label=1)]} + } + processed_prompts = zero_shot_segment_anything._preprocess_prompts(transformed_batch) + + for prompts in processed_prompts.values(): + for prompt in prompts: + if "bboxes" in prompt: + prompt["bboxes"]["point_coords"].shape == (1, 2, 2) + elif "points" in prompt: + prompt["points"]["point_coords"].shape == (1, 1, 2) @e2e_pytest_unit def test_generate_masked_features(self, set_zero_shot_segment_anything) -> None: diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index d2a5852e0af..fd45f13f1fb 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -343,6 +343,8 @@ def test_export_to_onnx(self): }, "visual_prompting_prompt_getter": { "image_embeddings": np.random.randn(1, embed_dim, *embed_size).astype(dtype=np.float32), + "reference_feats": np.random.randn(1, 1, 256).astype(dtype=np.float32), + "used_indices": np.array([[0]], dtype=np.int64), "original_size": np.random.randint(low=0, high=image_size * 2, size=(1, 2), dtype=np.int64), "threshold": np.array([[0.1]], dtype=np.float32), "num_bg_points": np.random.randint(low=1, high=image_size, size=(1, 1), dtype=np.int64), @@ -379,7 +381,7 @@ def test_save_model(self, mocker): mocker_otx_model = mocker.patch("otx.api.entities.model.ModelEntity") mocker_io_bytes_io = mocker.patch("io.BytesIO") mocker_torch_save = mocker.patch("torch.save") - mocker.patch.object(self.zero_shot_task.model, "state_dict", return_value={"reference_info.reference_feats": None}) + mocker.patch.object(self.zero_shot_task.model, "state_dict", return_value={"reference_info.reference_feats": None, "reference_info.used_indices": None}) self.zero_shot_task.model.reference_info = "reference_info" From bdd7fc5a90b4edb1c4f5a548eff84891a666ecdd Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Fri, 16 Feb 2024 14:24:37 +0900 Subject: [PATCH 10/28] Update OV inference --- .../model_wrappers/openvino_models.py | 5 +- .../zero_shot_segment_anything.py | 19 +-- .../visual_prompting/tasks/inference.py | 6 +- .../visual_prompting/tasks/openvino.py | 92 +++++++---- .../model_wrappers/test_openvino_models.py | 22 ++- .../test_zero_shot_segment_anything.py | 8 +- .../visual_prompting/tasks/test_inference.py | 4 +- .../visual_prompting/tasks/test_openvino.py | 146 +++++++++++++++--- 8 files changed, 227 insertions(+), 75 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 128e92687e4..4861f6183be 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -62,6 +62,9 @@ class PromptGetter(ImageModel): """PromptGetter class for zero-shot visual prompting of openvino model wrapper.""" __model__ = "prompt_getter" + + def __init__(self, inference_adapter, configuration=None, preload=False): + super().__init__(inference_adapter, configuration, preload) @classmethod def parameters(cls) -> Dict[str, Any]: # noqa: D102 @@ -187,7 +190,7 @@ def sigmoid(x): blur_strength=self.blur_strength, ) - probability = max(min(float(outputs["iou_predictions"]), 1.0), 0.0) + probability = max(min(float(outputs["scores"]), 1.0), 0.0) meta["label"].probability = probability return hard_prediction, soft_prediction diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index ed902e2c4be..f33b8170132 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -8,7 +8,7 @@ import json from copy import deepcopy from itertools import product -import ast +import pickle from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union import torch @@ -49,7 +49,7 @@ def set_default_thresholds(self, default_threshold_reference: float, default_thr def forward( self, - image_embedding: Tensor, + image_embeddings: Tensor, reference_feats: Tensor, used_indices: Tensor, original_size: Tensor, @@ -57,7 +57,7 @@ def forward( num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), ) -> Tuple[Tensor, Tensor]: """Get prompt candidates.""" - device = image_embedding.device + device = image_embeddings.device original_size = original_size.squeeze() threshold = threshold.squeeze().to(device) num_bg_points = num_bg_points.squeeze() @@ -66,7 +66,7 @@ def forward( total_bg_coords: Tensor = torch.zeros(used_indices.max() + 1, num_bg_points, 2, device=device) for label in used_indices[0]: points_scores, bg_coords = self.get_prompt_candidates( - image_embedding=image_embedding, + image_embeddings=image_embeddings, reference_feat=reference_feats[label], original_size=original_size, threshold=threshold, @@ -88,7 +88,7 @@ def forward( def get_prompt_candidates( self, - image_embedding: Tensor, + image_embeddings: Tensor, reference_feat: Tensor, original_size: Tensor, threshold: Union[Tensor, float] = 0., @@ -96,7 +96,7 @@ def get_prompt_candidates( device: Union[torch.device, str] = torch.device("cpu"), ) -> Tuple[Tensor, Tensor]: """Get prompt candidates from given reference and target features.""" - target_feat = image_embedding.squeeze() + target_feat = image_embeddings.squeeze() c_feat, h_feat, w_feat = target_feat.shape target_feat = target_feat / target_feat.norm(dim=0, keepdim=True) target_feat = target_feat.reshape(c_feat, h_feat * w_feat) @@ -395,9 +395,9 @@ def infer( images = tb["images"].unsqueeze(0).to(self.device) original_size = torch.as_tensor(tb["original_size"]) - image_embedding = self.image_encoder(images) + image_embeddings = self.image_encoder(images) total_points_scores, total_bg_coords = self.prompt_getter( - image_embedding=image_embedding, + image_embeddings=image_embeddings, reference_feats=reference_feats, used_indices=used_indices, original_size=original_size) @@ -423,7 +423,7 @@ def infer( [1] + [0] * len(bg_coords), dtype=torch.float32, device=self.device ).unsqueeze(0) mask = self._predict_masks( - image_embeddings=image_embedding, + image_embeddings=image_embeddings, point_coords=point_coords, point_labels=point_labels, original_size=original_size, @@ -770,5 +770,6 @@ def training_epoch_end(self, outputs) -> None: path_reference_info = self.path_reference_info.format(datetime.now().strftime("%Y%m%d-%H%M%S")) os.makedirs(os.path.dirname(path_reference_info), exist_ok=True) torch.save(self.reference_info, path_reference_info) + pickle.dump({k: v.numpy() for k, v in self.reference_info.items()}, open(path_reference_info.replace(".pt", ".pickle"), "wb")) json.dump(repr(self.trainer.datamodule.train_dataset.dataset), open(path_reference_info.replace("reference_info.pt", "reference_meta.json"), "w")) logger.info(f"Saved reference info at {path_reference_info}") diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index f5033bfb99a..f9c705a2489 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -643,10 +643,10 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): elif module == "visual_prompting_prompt_getter": dummy_inputs = { "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float32), - "reference_feats": torch.randn(1, 1, 256, dtype=torch.float32), - "used_indices": torch.as_tensor([[0]], dtype=torch.int64), + "reference_feats": torch.randn(2, 1, 256, dtype=torch.float32), + "used_indices": torch.tensor([[0, 1]], dtype=torch.int64), "original_size": torch.randint(low=0, high=image_size * 2, size=(1, 2), dtype=torch.int64), - "threshold": torch.as_tensor([[0.1]], dtype=torch.float32), + "threshold": torch.tensor([[0.1]], dtype=torch.float32), "num_bg_points": torch.randint(low=1, high=image_size, size=(1, 1), dtype=torch.int64), } output_names = ["total_points_scores", "total_bg_coords"] diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index d550f1fb33d..f55e6f6bdbd 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -15,6 +15,7 @@ # and limitations under the License. import io +import pickle import json import os import random @@ -206,10 +207,6 @@ def await_all(self) -> None: self.model["image_encoder"].await_all() self.model["decoder"].await_all() - def pre_process_prompt_getter(self, *args, **kwargs) -> Any: - """Pre-process function of OpenVINO Zero-shot VIsual Prompting Inferencer for prompt getter.""" - pass - class OpenVINOZeroShotVisualPromptingInferencer(OpenVINOVisualPromptingInferencer): """Inferencer implementation for Zero-shot Visual Prompting using OpenVINO backend. @@ -291,34 +288,45 @@ def __init__( self.point_labels_box = np.array([[2, 3]], dtype=np.float32) self.has_mask_inputs = [np.array([[0.0]]), np.array([[1.0]])] + + self.reference_feats = None + self.used_indices = None + self.path_reference_info = "vpm_zsl_reference_infos/{}/reference_info.pickle" - def pre_process( # type: ignore - self, dataset_item: DatasetItemEntity, extra_processing: bool = False - ) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: + def pre_process_image_encoder(self, inputs: np.ndarray, extra_processing: bool = False) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: """Pre-process function of OpenVINO Zero-shot Visual Prompting Inferencer for image encoder.""" - return self.model["image_encoder"].preprocess(dataset_item.numpy, extra_processing) + return self.model["image_encoder"].preprocess(inputs, extra_processing) def pre_process_prompt_getter( - self, image_embeddings: Dict[str, np.ndarray], original_size: np.ndarray + self, + image_embeddings: Dict[str, np.ndarray], + reference_feats: np.ndarray, + used_indices: np.ndarray, + original_size: np.ndarray, ) -> Dict[str, np.ndarray]: """Pre-process function of OpenVINO Zero-shot VIsual Prompting Inferencer for prompt getter.""" inputs_prompt_getter = { "original_size": original_size[None], - "threshold": np.array([[self.model["prompt_getter"].sim_threshold]], dtype=np.float32), - "num_bg_points": np.array([[self.model["prompt_getter"].num_bg_points]], dtype=np.int64), + "reference_feats": reference_feats, + "used_indices": used_indices, + "threshold": np.asarray([[self.model["prompt_getter"].sim_threshold]], dtype=np.float32), + "num_bg_points": np.asarray([[self.model["prompt_getter"].num_bg_points]], dtype=np.int64), } inputs_prompt_getter.update(image_embeddings) return inputs_prompt_getter - - def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore + + def learn(self, images: np.ndarray): + """Learn""" + + def infer(self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray) -> List: """Perform a prediction for a given input image.""" # forward image encoder - images, meta = self.pre_process(dataset_item) - original_size = np.array(meta["original_shape"][:2], dtype=np.int64) + images, meta = self.pre_process_image_encoder(images) + original_size = np.asarray(meta["original_shape"][:2], dtype=np.int64) image_embeddings = self.forward_image_encoder(images) # get point candidates - inputs_prompt_getter = self.pre_process_prompt_getter(image_embeddings, original_size) + inputs_prompt_getter = self.pre_process_prompt_getter(image_embeddings, reference_feats, used_indices, original_size) total_prompts = self.forward_prompt_getter(inputs_prompt_getter) annotations: DefaultDict = defaultdict(list) @@ -343,10 +351,14 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: point_coords = np.concatenate((np.array([[x, y]]), bg_coords), axis=0, dtype=np.float32) point_coords = self.model["decoder"]._apply_coords(point_coords, original_size) point_labels = np.array([1] + [0] * len(bg_coords), dtype=np.float32) - inputs_decoder = {"point_coords": point_coords[None], "point_labels": point_labels[None]} + inputs_decoder = { + "point_coords": point_coords[None], + "point_labels": point_labels[None], + "orig_size": original_size[None]} inputs_decoder.update(image_embeddings) prediction = self.forward_decoder(inputs_decoder, original_size) + prediction.update({"scores": points_score[-1]}) metadata = { "label": [_label for _label in self.labels if int(_label.id_) == label][0], "original_size": original_size[None], @@ -357,8 +369,14 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: annotations[label].extend(annotation) predicted_masks[label].append(hard_prediction) used_points[label].append(points_score) - self.__inspect_overlapping_areas(predicted_masks, used_points, annotations) - return sum(annotations.values(), []) + self._inspect_overlapping_areas(predicted_masks, used_points, annotations) + return sum(annotations.values(), []), predicted_masks, used_points + + def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore + """Perform a prediction for a given input image.""" + self._get_reference_info() + results = self.infer(dataset_item.numpy, self.reference_feats, self.used_indices) + return results[0] def forward_prompt_getter(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" @@ -370,7 +388,6 @@ def forward_decoder( # type: ignore """Forward function of OpenVINO Visual Prompting Inferencer.""" logits: np.ndarray scores: np.ndarray - mask_slice = slice(0, 1) for i in range(3): if i == 0: # First-step prediction @@ -383,7 +400,7 @@ def forward_decoder( # type: ignore # Cascaded Post-refinement-1 mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) if masks.sum() == 0: - return {"masks": masks} + return {"upscaled_masks": masks} has_mask_input = self.has_mask_inputs[1] @@ -391,7 +408,7 @@ def forward_decoder( # type: ignore # Cascaded Post-refinement-2 mask_input, masks = self._postprocess_masks(masks, logits, scores) if masks.sum() == 0: - return {"masks": masks} + return {"upscaled_masks": masks} has_mask_input = self.has_mask_inputs[1] y, x = np.nonzero(masks) @@ -409,14 +426,12 @@ def forward_decoder( # type: ignore masks = upscaled_masks > self.model["decoder"].mask_threshold _, masks = self._postprocess_masks(masks, logits, scores) - return {"masks": masks} + return {"upscaled_masks": masks} def _postprocess_masks( self, masks: np.ndarray, logits: np.ndarray, scores: np.ndarray, is_single: bool = False ) -> Tuple[np.ndarray, ...]: """Post-process logits for resized masks according to best index based on scores.""" - masks = masks.transpose(2, 0, 1)[None] - if is_single: best_idx = 0 else: @@ -436,14 +451,14 @@ def _postprocess_masks( best_idx = np.argmax(scores[0]) return logits[:, [best_idx]], masks[0, best_idx] - def __inspect_overlapping_areas( + def _inspect_overlapping_areas( self, predicted_masks: Dict[int, List[np.ndarray]], used_points: Dict[int, List[np.ndarray]], annotations: Dict[int, List[np.ndarray]], threshold_iou: float = 0.8, ): - def __calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): + def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): assert mask1.ndim == 2 and mask2.ndim == 2 intersection = np.logical_and(mask1, mask2).sum().item() union = np.logical_or(mask1, mask2).sum().item() @@ -461,21 +476,36 @@ def __calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): overlapped_label = [] overlapped_other_label = [] for (im, mask), (jm, other_mask) in product(enumerate(masks), enumerate(other_masks)): - if __calculate_mask_iou(mask, other_mask) > threshold_iou: + if _calculate_mask_iou(mask, other_mask) > threshold_iou: if used_points[label][im][2] > used_points[other_label][jm][2]: overlapped_other_label.append(jm) else: overlapped_label.append(im) - for im in overlapped_label[::-1]: + for im in sorted(list(set(overlapped_label)), reverse=True): masks.pop(im) used_points[label].pop(im) annotations[label].pop(im) - for jm in overlapped_other_label[::-1]: + for jm in sorted(list(set(overlapped_other_label)), reverse=True): other_masks.pop(jm) used_points[other_label].pop(jm) annotations[other_label].pop(jm) + + def _find_latest_reference_info(self) -> Union[str, None]: + """Find latest reference info to be used.""" + if len(stamps := sorted(os.listdir("vpm_zsl_reference_infos"), reverse=True)) > 0: + return stamps[0] + return None + + def _get_reference_info(self) -> None: + """Get reference info through loading previously saved one or running `learn`.""" + if self.reference_feats is None and self.used_indices is None and (latest_stamp := self._find_latest_reference_info()) is not None: + # load previously saved reference info + latest_reference_info = self.path_reference_info.format(latest_stamp) + reference_info = pickle.load(open(latest_reference_info, "rb")) + self.reference_feats = reference_info["reference_feats"] + self.used_indices = reference_info["used_indices"] class OTXOpenVinoDataLoader: @@ -562,7 +592,7 @@ def __getitem__(self, index: int) -> Dict[str, Any]: items = self.dataset[index] images, meta = self.inferencer.pre_process(items, extra_processing=True) # type: ignore - original_size = np.array(meta["original_shape"][:2]) + original_size = np.asarray(meta["original_shape"][:2]) _, _, h, w = images["images"].shape pad_width = ((0, 0), (0, 0), (0, self.target_length - h), (0, self.target_length - w)) images["images"] = np.pad(images["images"], pad_width, mode="constant", constant_values=0) diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index e6ba00166d0..1518dbd6131 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -56,6 +56,26 @@ def test_parameters(self): assert params.get("sim_threshold").default_value == 0.5 assert params.get("num_bg_points").default_value == 1 + + @e2e_pytest_unit + def test_get_inputs(self, mocker): + """Test _get_inputs.""" + mocker.patch.object(ImageModel, "__init__") + prompt_getter = PromptGetter("adapter") + + prompt_getter.inputs = { + "image_embeddings": np.ones((1, 4, 4, 3)), + "reference_feats": np.ones((2, 1, 256)), + "used_indices": np.array([[0, 1]], dtype=np.int64), + "original_size": np.array([[4, 4]], dtype=np.int64), + "threshold": np.array([[0.1]]), + "num_bg_points": np.array([[1]], dtype=np.int64), + } + + returned_value = prompt_getter._get_inputs() + + assert returned_value[0] == ["image_embeddings"] + assert returned_value[1] == ["reference_feats", "used_indices", "original_size", "threshold", "num_bg_points"] class TestDecoder: @@ -162,7 +182,7 @@ def test_postprocess(self, mocker): self.decoder.output_blob_name = "masks" self.decoder.soft_threshold = 0.5 self.decoder.blur_strength = 2 - fake_output = {"masks": np.ones((4, 4)), "iou_predictions": 0.1} + fake_output = {"masks": np.ones((4, 4)), "scores": 0.1} fake_metadata = {"original_size": np.array([[6, 6]]), "label": mocker.Mock(spec=LabelEntity)} returned_value = self.decoder.postprocess(outputs=fake_output, meta=fake_metadata) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index cba6b616aa3..61a8b3cd7de 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -52,13 +52,13 @@ def test_forward(self, mocker, result_point_selection: torch.Tensor) -> None: self.prompt_getter, "get_prompt_candidates", return_value=(result_point_selection, torch.zeros(1, 2))) - image_embedding = torch.ones(1, 4, 4, 4) + image_embeddings = torch.ones(1, 4, 4, 4) reference_feats = torch.rand(1, 1, 4) used_indices = torch.as_tensor([[0]]) original_size = torch.tensor((self.prompt_getter.image_size, self.prompt_getter.image_size), dtype=torch.int64) total_points_scores, total_bg_coords = self.prompt_getter( - image_embedding=image_embedding, reference_feats=reference_feats, used_indices=used_indices, original_size=original_size + image_embeddings=image_embeddings, reference_feats=reference_feats, used_indices=used_indices, original_size=original_size ) assert total_points_scores.shape[0] == 1 @@ -72,14 +72,14 @@ def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tenso "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" ) mocker.patch.object(self.prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2))) - image_embedding = torch.ones(1, 4, 4, 4) + image_embeddings = torch.ones(1, 4, 4, 4) reference_feat = torch.rand(1, 4) original_size = torch.tensor( [[self.prompt_getter.image_size, self.prompt_getter.image_size]], dtype=torch.int64 ) points_scores, bg_coords = self.prompt_getter.get_prompt_candidates( - image_embedding=image_embedding, reference_feat=reference_feat, original_size=original_size + image_embeddings=image_embeddings, reference_feat=reference_feat, original_size=original_size ) assert torch.all(points_scores == result_point_selection) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index fd45f13f1fb..59c5fa37215 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -343,8 +343,8 @@ def test_export_to_onnx(self): }, "visual_prompting_prompt_getter": { "image_embeddings": np.random.randn(1, embed_dim, *embed_size).astype(dtype=np.float32), - "reference_feats": np.random.randn(1, 1, 256).astype(dtype=np.float32), - "used_indices": np.array([[0]], dtype=np.int64), + "reference_feats": np.random.randn(2, 1, 256).astype(dtype=np.float32), + "used_indices": np.array([[0, 1]], dtype=np.int64), "original_size": np.random.randint(low=0, high=image_size * 2, size=(1, 2), dtype=np.int64), "threshold": np.array([[0.1]], dtype=np.float32), "num_bg_points": np.random.randint(low=1, high=image_size, size=(1, 1), dtype=np.int64), diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 459aba889f5..bf791571efd 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -193,24 +193,24 @@ def setup(self, mocker): visual_prompting_hparams = self.task_environment.get_hyper_parameters(VisualPromptingBaseConfig) label_schema = self.task_environment.label_schema - self.visual_prompting_ov_inferencer = OpenVINOZeroShotVisualPromptingInferencer( + self.zero_shot_visual_prompting_ov_inferencer = OpenVINOZeroShotVisualPromptingInferencer( visual_prompting_hparams, label_schema, {"image_encoder": "", "prompt_getter": "", "decoder": ""}, {"image_encoder": "", "prompt_getter": "", "decoder": ""}, ) - self.visual_prompting_ov_inferencer.model["decoder"] = mocker.patch( + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"] = mocker.patch( "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", autospec=True ) - self.visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.3 - self.visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.3 + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) @e2e_pytest_unit def test_predict(self, mocker): """Test predict.""" mocker_pre_process = mocker.patch.object( OpenVINOZeroShotVisualPromptingInferencer, - "pre_process", + "pre_process_image_encoder", return_value=(torch.zeros((1, 3, 2, 2)), {"original_shape": (4, 4, 1)}), ) mocker_forward = mocker.patch.object( @@ -223,15 +223,13 @@ def test_predict(self, mocker): "forward_prompt_getter", return_value={"total_points_scores": np.array([[[1, 1, 1]]]), "total_bg_coords": np.array([[[2, 2]]])}, ) - mocker_forward_decoder = mocker.patch.object( - OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value=None - ) + mocker_forward_decoder = mocker.patch.object(OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value={}) mocker_post_process = mocker.patch.object( OpenVINOZeroShotVisualPromptingInferencer, "post_process", return_value=(self.fake_annotation, None, None) ) fake_input = mocker.Mock(spec=DatasetItemEntity) - returned_value = self.visual_prompting_ov_inferencer.predict(fake_input) + returned_value = self.zero_shot_visual_prompting_ov_inferencer.predict(fake_input) mocker_pre_process.assert_called_once() mocker_forward.assert_called_once() @@ -246,12 +244,12 @@ def test_predict(self, mocker): ( (np.ones((1, 1)), np.ones((3, 3))), {"upscaled_masks": np.ones((3, 3)), "iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, - {"masks": np.ones((3, 3))}, + {"upscaled_masks": np.ones((3, 3))}, ), ( (np.zeros((2, 2)), np.zeros((3, 3))), {"upscaled_masks": np.zeros((3, 3)), "iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, - {"masks": np.zeros((3, 3))}, + {"upscaled_masks": np.zeros((3, 3))}, ), ], ) @@ -264,16 +262,16 @@ def test_forward_decoder( ): """Test forward_decoder.""" mocker.patch.object( - self.visual_prompting_ov_inferencer.model["decoder"], "infer_sync", return_value=infer_sync_output + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"], "infer_sync", return_value=infer_sync_output ) mocker.patch.object( - self.visual_prompting_ov_inferencer.model["decoder"], + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"], "_apply_coords", return_value=np.array([[[1, 1]]], dtype=np.float32), ) - mocker.patch.object(self.visual_prompting_ov_inferencer, "_postprocess_masks", return_value=postprocess_output) + mocker.patch.object(self.zero_shot_visual_prompting_ov_inferencer, "_postprocess_masks", return_value=postprocess_output) - result = self.visual_prompting_ov_inferencer.forward_decoder( + result = self.zero_shot_visual_prompting_ov_inferencer.forward_decoder( inputs={ "image_embeddings": np.empty((1, 4, 2, 2)), "point_coords": np.array([[[1, 1]]], dtype=np.float32), @@ -282,39 +280,139 @@ def test_forward_decoder( original_size=np.array([3, 3]), ) - assert np.all(result["masks"] == expected["masks"]) + assert np.all(result["upscaled_masks"] == expected["upscaled_masks"]) @e2e_pytest_unit @pytest.mark.parametrize( "masks,expected_masks", [ ( - np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[..., None], 4, axis=-1), + np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[None], 4, axis=0)[None], np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_) ), ( np.concatenate( ( - np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[..., None], 3, axis=-1), - np.zeros((3, 3, 1)), + np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[None], 3, axis=0)[None], + np.zeros((1, 1, 3, 3)), ), - axis=-1, + axis=1, ), np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_) ), - (np.zeros((3, 3, 4)), np.zeros((3, 3))), + (np.zeros((1, 4, 3, 3)), np.zeros((3, 3))), ], ) def test_postprocess_masks(self, masks: np.ndarray, expected_masks: np.ndarray): """Test _postprocess_masks.""" - self.visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.0 - self.visual_prompting_ov_inferencer.model["decoder"].image_size = 3 + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.0 + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].image_size = 3 - _, result_masks = self.visual_prompting_ov_inferencer._postprocess_masks( + _, result_masks = self.zero_shot_visual_prompting_ov_inferencer._postprocess_masks( masks=masks, logits=np.empty((1, 4, 2, 2)), scores=np.array([[0.5, 0.7, 0.8, 0.9]])) assert result_masks.shape == (3, 3) assert np.all(result_masks == expected_masks) + + @e2e_pytest_unit + def test_inspect_overlapping_areas(self) -> None: + """Test _inspect_overlapping_areas.""" + predicted_masks = { + 0: [ + np.array( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + ], + ), + ], + 1: [ + np.array( + [ + [0, 0, 0, 1, 1, 0], + [0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + ], + ), + np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + ], + ), + np.array( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + ), + ], + } + used_points = { + 0: [ + np.array([0, 0, 0.5]), # to be removed + np.array([2, 2, 0.5]), + np.array([1, 4, 0.5]), + ], + 1: [ + np.array([3, 0, 0.5]), + np.array([4, 4, 0.5]), + np.array([1, 4, 0.3]), # to be removed + np.array([0, 0, 0.7]), + ], + } + + self.zero_shot_visual_prompting_ov_inferencer._inspect_overlapping_areas(predicted_masks, used_points, predicted_masks.copy(), threshold_iou=0.5) + + assert len(predicted_masks[0]) == 1 + assert len(predicted_masks[1]) == 2 + assert all(np.array([2, 2, 0.5]) == used_points[0][0]) + assert all(np.array([0, 0, 0.7]) == used_points[1][2]) class TestOTXOpenVinoDataLoader: From 358d97b88acaf723da36661946da81c2779ee8ce Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Fri, 16 Feb 2024 22:32:41 +0900 Subject: [PATCH 11/28] Fix for unit & intg tests --- .../datasets/pipelines/sam_transforms.py | 3 +- .../zero_shot_segment_anything.py | 98 +++++-------------- .../visual_prompting/tasks/openvino.py | 1 + .../test_zero_shot_segment_anything.py | 26 ----- .../visual_prompting/tasks/test_openvino.py | 2 +- .../visual_prompting/test_helpers.py | 7 +- 6 files changed, 36 insertions(+), 101 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py index a47efc97540..63a58b9229e 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py @@ -36,7 +36,8 @@ def __call__(self, item: Dict[str, Union[List, Tensor]]) -> Dict[str, Union[List item["images"] = torch.as_tensor( self.apply_image(item["images"], self.target_length).transpose((2, 0, 1)), dtype=torch.get_default_dtype() ) - item["gt_masks"] = [torch.as_tensor(gt_mask) for gt_mask in item["gt_masks"]] + if "gt_masks" in item: + item["gt_masks"] = [torch.as_tensor(gt_mask) for gt_mask in item["gt_masks"]] if "bboxes" in item: item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"], self.target_length) if "points" in item: diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index f33b8170132..ebdd7ae3783 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -64,7 +64,8 @@ def forward( total_points_scores: Tensor = torch.zeros(used_indices.max() + 1, 0, 3, device=device) total_bg_coords: Tensor = torch.zeros(used_indices.max() + 1, num_bg_points, 2, device=device) - for label in used_indices[0]: + for i in range(len(used_indices[0])): + label = used_indices[0][i] points_scores, bg_coords = self.get_prompt_candidates( image_embeddings=image_embeddings, reference_feat=reference_feats[label], @@ -158,7 +159,7 @@ def _point_selection( matched_grid = fg_coords_scores.unsqueeze(1) * matched_matrix.unsqueeze(-1) # sample the highest score one of the samples that are in the same grid - points_scores = matched_grid[matched_grid[..., -1].argsort(dim=0, descending=True)[0]].diagonal().T + points_scores = matched_grid[matched_grid[..., -1].topk(k=1, dim=0, largest=True)[1][0]].diagonal().T # sort by the highest score points_scores = points_scores[torch.argsort(points_scores[:, -1], descending=True)] @@ -228,6 +229,10 @@ def set_default_config(self) -> DictConfig: "freeze_prompt_encoder": True, "image_size": 1024, "mask_threshold": 0.0, + "return_single_mask": False, + "use_stability_score": False, + "stability_score_offset": 1., + "return_extra_metrics": False, }, "dataset": { "normalize": { @@ -289,7 +294,7 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ processed_prompts = [self._preprocess_prompts(tb) for tb in transformed_batch] # initialize tensors to contain reference features and prompts - largest_label = max([int(label.id) for pp in processed_prompts for label in pp.keys()]) + largest_label = max([label for pp in processed_prompts for label in pp.keys()]) self.expand_reference_info(largest_label) # TODO(sungchul): consider who to handle multiple reference features, currently replace it @@ -304,10 +309,7 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ ref_masks = torch.zeros(largest_label + 1, *map(int, original_size)) for label, input_prompts in pp.items(): - if label.name.lower() == "background": - # skip background - # TODO (sungchul): how to skip background class - continue + # TODO (sungchul): how to skip background class # generate reference mask # TODO (sungchul): ensemble multi reference features (current : use merged masks) @@ -356,9 +358,9 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ ref_feat = self._generate_masked_features(processed_embedding, ref_mask, default_threshold_reference) default_threshold_reference -= 0.05 - self.reference_info["reference_feats"][int(label.id)] = ref_feat.detach().cpu() - self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([[int(label.id)]])), dim=1), requires_grad=False) - ref_masks[int(label.id)] = ref_mask.detach().cpu() + self.reference_info["reference_feats"][label] = ref_feat.detach().cpu() + self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([[label]])), dim=1), requires_grad=False) + ref_masks[label] = ref_mask.detach().cpu() batch_ref_masks.append(ref_masks) return self.reference_info, batch_ref_masks @@ -366,8 +368,8 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ def infer( self, batch: List[Dict[str, Any]], - reference_feats: Tensor, - used_indices: Tensor, + reference_feats: Union[np.ndarray, Tensor], + used_indices: Union[np.ndarray, Tensor], is_cascade: bool = False, ) -> List[List[DefaultDict[int, List[Tensor]]]]: """Zero-shot inference with reference features. @@ -376,8 +378,8 @@ def infer( Args: batch (List[Dict[str, Any]]): List of dictionaries containing images and metas. - reference_feats (Tensor): Reference features for target prediction. - used_indices (Tensor): To check which indices of reference features are validate. + reference_feats (Union[np.ndarray, Tensor]): Reference features for target prediction. If it is np.ndarray, it will be converted to torch tensor. + used_indices (Union[np.ndarray, Tensor]): To check which indices of reference features are validate. If it is np.ndarray, it will be converted to torch tensor. is_cascade (bool): Whether use cascade inference. Defaults to False. Returns: @@ -386,6 +388,11 @@ def infer( 1. Target images 2. Tuple of predicted masks and used points gotten by point selection """ + if isinstance(reference_feats, np.ndarray): + reference_feats = torch.as_tensor(reference_feats, device=self.device) + if isinstance(used_indices, np.ndarray): + used_indices = torch.as_tensor(used_indices, device=self.device) + # preprocess images and prompts transformed_batch = [self.transforms(b.copy()) for b in batch] @@ -537,7 +544,7 @@ def predict_step(self, batch, batch_idx): results = self.infer(batch, self.reference_info["reference_feats"], self.reference_info["used_indices"]) return [result[0] for result in results] # tmp: only mask - def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[ScoredLabel, List[Dict[str, Tensor]]]: + def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[int, List[Dict[str, Tensor]]]: """Preprocess prompts. Currently, preprocessing for bounding boxes is only supported. @@ -546,7 +553,7 @@ def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[ScoredLabel, List[D batch (Dict[str, Any]): Dictionary containing data and prompts information. Returns: - (defaultdict[ScoredLabel, List[Dict[str, Tensor]]]): Processed and arranged each single prompt + (defaultdict[int, List[Dict[str, Tensor]]]): Processed and arranged each single prompt using label information as keys. Unlike other prompts, `annotation` prompts will be aggregated as single annotation. """ @@ -557,6 +564,8 @@ def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[ScoredLabel, List[D if prompts is None or len(prompts) == 0: continue for prompt, label in zip(prompts, labels): + if isinstance(label, ScoredLabel): + label = int(label.id_) # TODO (sungchul): revisit annotations and polygons if prompt_name == "annotations": processed_prompts[label].append({prompt_name: torch.as_tensor(prompt, device=self.device)}) @@ -581,7 +590,7 @@ def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[ScoredLabel, List[D "point_labels": torch.tensor([[1]], device=self.device), }}) - processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x[0].id_)) + processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x)) return processed_prompts def _preprocess_coords( @@ -684,60 +693,6 @@ def _postprocess_masks( best_idx = torch.argmax(scores[0]) return logits[:, best_idx], masks[0, best_idx] - def _update_value(self, target: Dict[str, Any], key: str, value: Tensor) -> None: - """Update tensor to target dictionary. - - Args: - target (Dict[str, Any]): Target dictionary to be updated. - key (str): Key to be used for update. - value (Tensor): Value to be used for update. - """ - if key in target: - target[key] = torch.cat((target[key], value)) - else: - target[key] = value - - def _merge_prompts( - self, - label: ScoredLabel, - input_prompts: Dict[str, Tensor], - processed_prompts: Dict[ScoredLabel, List[Dict[str, Tensor]]], - use_only_background: bool = True, - ) -> Dict[str, Tensor]: - """Merge target prompt and other prompts. - - Merge a foreground prompt and other prompts (background or prompts with other classes). - - Args: - label (ScoredLabel): Label information. Background is 0 and other foregrounds are >= 0. - input_prompts (Dict[str, Tensor]): A foreground prompt to be merged with other prompts. - processed_prompts (Dict[ScoredLabel, List[Dict[str, Tensor]]]): The whole class-wise prompts - processed at _preprocess_prompts. - use_only_background (bool): Whether merging only background prompt, defaults to True. - It is applied to only point_coords. - - Returns: - (Dict[str, Tensor]): Merged prompts. - """ - merged_input_prompts = deepcopy(input_prompts) - for other_label, other_input_prompts in processed_prompts.items(): - if other_label.id_ == label.id_: - continue - if (use_only_background and other_label.id_ == 0) or (not use_only_background): - # only add point (and scribble) prompts - # use_only_background=True -> background prompts are only added as background - # use_only_background=False -> other prompts are added as background - for other_input_prompt in other_input_prompts: - if "point_coords" in other_input_prompt: - # point, scribble - self._update_value(merged_input_prompts, "point_coords", other_input_prompt.get("point_coords")) - self._update_value( - merged_input_prompts, - "point_labels", - torch.zeros_like(other_input_prompt.get("point_labels")), - ) - return merged_input_prompts - def set_metrics(self) -> None: """Skip set_metrics unused in zero-shot learning.""" pass @@ -766,6 +721,7 @@ def on_predict_start(self) -> None: def training_epoch_end(self, outputs) -> None: """Called in the training loop at the very end of the epoch.""" + self.reference_info["used_indices"] = Parameter(self.reference_info["used_indices"].unique().unsqueeze(0), requires_grad=False) if self.config.model.save_outputs: path_reference_info = self.path_reference_info.format(datetime.now().strftime("%Y%m%d-%H%M%S")) os.makedirs(os.path.dirname(path_reference_info), exist_ok=True) diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index f55e6f6bdbd..1a293313ced 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -185,6 +185,7 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: # forward decoder to get predicted mask prediction = self.forward_decoder(prompt) + prediction["scores"] = prediction["iou_predictions"] metadata = {"label": label} # set annotation for eval diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 61a8b3cd7de..e48c8b88e03 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -404,29 +404,3 @@ def test_postprocess_masks( _, result = zero_shot_segment_anything._postprocess_masks(masks, logits, scores) assert torch.equal(result, expected) - - @e2e_pytest_unit - @pytest.mark.parametrize("use_only_background", [True, False]) - def test_merge_prompts(self, set_zero_shot_segment_anything, use_only_background: bool) -> None: - """Test _merge_prompts.""" - zero_shot_segment_anything = set_zero_shot_segment_anything() - - input_prompts = {"point_coords": torch.tensor([1]), "point_labels": torch.tensor([1])} - processed_prompts = { - MockScoredLabel(label=0): [{"point_coords": torch.tensor([0]), "point_labels": torch.tensor([0])}], - MockScoredLabel(label=2): [{"point_coords": torch.tensor([2]), "point_labels": torch.tensor([1])}], - } - - merged_input_prompts = zero_shot_segment_anything._merge_prompts( - label=MockScoredLabel(label=1), - input_prompts=input_prompts, - processed_prompts=processed_prompts, - use_only_background=use_only_background, - ) - - if use_only_background: - assert torch.equal(merged_input_prompts.get("point_coords"), torch.tensor([1, 0])) - assert torch.equal(merged_input_prompts.get("point_labels"), torch.tensor([1, 0])) - else: - assert torch.equal(merged_input_prompts.get("point_coords"), torch.tensor([1, 0, 2])) - assert torch.equal(merged_input_prompts.get("point_labels"), torch.tensor([1, 0, 0])) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index bf791571efd..9196204dbaa 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -138,7 +138,7 @@ def test_predict(self, mocker): return_value={"image_embeddings": np.empty((4, 2, 2))}, ) mocker_forward_decoder = mocker.patch.object( - OpenVINOVisualPromptingInferencer, "forward_decoder", return_value=None + OpenVINOVisualPromptingInferencer, "forward_decoder", return_value={"iou_predictions": 0.1} ) mocker_post_process = mocker.patch.object( OpenVINOVisualPromptingInferencer, "post_process", return_value=(self.fake_annotation, None, None) diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index ac16b4cea4e..5775c7712de 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -17,6 +17,8 @@ AnnotationSceneEntity, AnnotationSceneKind, ) +from unittest.mock import Mock +from otx.api.entities.scored_label import ScoredLabel from otx.api.entities.color import Color from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.datasets import DatasetEntity @@ -184,8 +186,9 @@ def predict_mask(self, *args, **kwargs): class MockScoredLabel: def __init__(self, label: int, name: str = "background"): self.name = name - self.id_ = label - self.id = label + self.label = Mock() + self.label.id_ = label + self.__class__ = ScoredLabel class MockPromptGetter(nn.Module): From cc60bb7fa432166ce66ac7d84b4519916e3975e7 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Fri, 16 Feb 2024 23:06:20 +0900 Subject: [PATCH 12/28] precommit --- .../model_wrappers/openvino_models.py | 10 +- .../pytorch_lightning/datasets/dataset.py | 51 ++++-- .../models/encoders/sam_image_encoder.py | 3 +- .../visual_prompters/segment_anything.py | 28 ++- .../zero_shot_segment_anything.py | 173 +++++++++++------- .../visual_prompting/tasks/inference.py | 6 +- .../visual_prompting/tasks/openvino.py | 109 +++++++---- .../model_wrappers/test_openvino_models.py | 55 +++--- .../callbacks/test_inference_callback.py | 12 +- .../datasets/pipelines/test_transforms.py | 135 +++++++------- .../models/encoders/test_sam_image_encoder.py | 5 +- .../visual_prompters/test_segment_anything.py | 6 +- .../test_zero_shot_segment_anything.py | 90 +++++---- .../visual_prompting/tasks/test_inference.py | 6 +- .../visual_prompting/tasks/test_openvino.py | 33 +++- 15 files changed, 428 insertions(+), 294 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 4861f6183be..816e7db118a 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -62,7 +62,7 @@ class PromptGetter(ImageModel): """PromptGetter class for zero-shot visual prompting of openvino model wrapper.""" __model__ = "prompt_getter" - + def __init__(self, inference_adapter, configuration=None, preload=False): super().__init__(inference_adapter, configuration, preload) @@ -73,7 +73,7 @@ def parameters(cls) -> Dict[str, Any]: # noqa: D102 parameters.update({"sim_threshold": NumericalValue(value_type=float, default_value=0.5, min=0, max=1)}) parameters.update({"num_bg_points": NumericalValue(value_type=int, default_value=1, min=0, max=1024)}) return parameters - + def _get_inputs(self): """Defines the model inputs for images and additional info.""" image_blob_names, image_info_blob_names = [], [] @@ -83,9 +83,7 @@ def _get_inputs(self): else: image_info_blob_names.append(name) if not image_blob_names: - self.raise_error( - "Failed to identify the input for the image: no 4D input layer found" - ) + self.raise_error("Failed to identify the input for the image: no 4D input layer found") return image_blob_names, image_info_blob_names @@ -101,7 +99,7 @@ def __init__( preload: bool = False, ): super().__init__(model_adapter, configuration, preload) - + self.mask_input = np.zeros((1, 1, 256, 256), dtype=np.float32) self.has_mask_input = np.zeros((1, 1), dtype=np.float32) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py index 83ada16d9c6..dcf336776b1 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py @@ -14,7 +14,8 @@ # See the License for the specific language governing permissions # and limitations under the License. -from typing import Any, Dict, List, Optional, Union +from collections import defaultdict +from typing import Any, DefaultDict, Dict, List, Optional, Union import cv2 import numpy as np @@ -31,7 +32,6 @@ ResizeLongestSide, collate_fn, ) -from collections import defaultdict from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.datasets import DatasetEntity from otx.api.entities.image import Image @@ -158,7 +158,7 @@ def __init__( self.transform = get_transform(image_size, mean, std) self.offset_bbox = offset_bbox self.labels = dataset.get_labels() - + if not use_bbox and not use_point: # if both are False, use bbox as default use_bbox = True @@ -179,7 +179,12 @@ def __len__(self) -> int: return len(self.dataset) @staticmethod - def get_prompts(dataset_item: DatasetItemEntity, dataset_labels: List[LabelEntity], prob: float = 1., mode: Subset = Subset.TESTING) -> Dict[str, Any]: + def get_prompts( + dataset_item: DatasetItemEntity, + dataset_labels: List[LabelEntity], + prob: float = 1.0, + mode: Subset = Subset.TESTING, + ) -> Dict[str, Any]: """Get propmts from dataset_item. Args: @@ -195,7 +200,7 @@ def get_prompts(dataset_item: DatasetItemEntity, dataset_labels: List[LabelEntit bboxes: List[np.ndarray] = [] points: List[np.ndarray] = [] gt_masks: List[np.ndarray] = [] - labels: defaultdict[str, List[ScoredLabel]] = defaultdict(list) + labels: DefaultDict[str, List[ScoredLabel]] = defaultdict(list) for annotation in dataset_item.get_annotations(labels=dataset_labels, include_empty=False, preserve_id=True): if isinstance(annotation.shape, Image): # use mask as-is @@ -286,7 +291,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: prompts = self.get_prompts(dataset_item, self.labels, self.prob) item.update({**prompts, "path": dataset_item.media.path}) - + return item @@ -388,7 +393,7 @@ def setup(self, stage: Optional[str] = None) -> None: image_size=image_size, mean=mean, std=std, - **self.kwargs + **self.kwargs, ) def summary(self): @@ -402,58 +407,66 @@ def summary(self): num_items, ) - def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]: + def train_dataloader(self) -> DataLoader: """Train Dataloader. Returns: - Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]: Train dataloader. + DataLoader: Train dataloader. """ return DataLoader( self.train_dataset, shuffle=True, batch_size=self.config.train_batch_size, num_workers=self.config.num_workers, - collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, + collate_fn=collate_fn + if self.train_type != TrainType.Zeroshot + else lambda x: x, # type: ignore[return-value] ) - def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]: + def val_dataloader(self) -> DataLoader: """Validation Dataloader. Returns: - Union[DataLoader, List[DataLoader]]: Validation Dataloader. + DataLoader: Validation Dataloader. """ return DataLoader( self.val_dataset, shuffle=False, batch_size=self.config.val_batch_size, num_workers=self.config.num_workers, - collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, + collate_fn=collate_fn + if self.train_type != TrainType.Zeroshot + else lambda x: x, # type: ignore[return-value] ) - def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]: + def test_dataloader(self) -> DataLoader: """Test Dataloader. Returns: - Union[DataLoader, List[DataLoader]]: Test Dataloader. + DataLoader: Test Dataloader. """ return DataLoader( self.test_dataset, shuffle=False, batch_size=self.config.test_batch_size, num_workers=self.config.num_workers, - collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, + collate_fn=collate_fn + if self.train_type != TrainType.Zeroshot + else lambda x: x, # type: ignore[return-value] ) - def predict_dataloader(self) -> Union[DataLoader, List[DataLoader]]: + def predict_dataloader(self) -> DataLoader: """Predict Dataloader. Returns: - Union[DataLoader, List[DataLoader]]: Predict Dataloader. + DataLoader: Predict Dataloader. """ return DataLoader( self.predict_dataset, shuffle=False, batch_size=1, num_workers=self.config.num_workers, - collate_fn=collate_fn if self.train_type != TrainType.Zeroshot else lambda x: x, + collate_fn=collate_fn + if self.train_type != TrainType.Zeroshot + else lambda x: x, # type: ignore[return-value] ) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py index d56df51fa5f..6944754c660 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/sam_image_encoder.py @@ -19,8 +19,9 @@ class SAMImageEncoder(nn.Module): Args: config (DictConfig): Config for image encoder. """ - + def __new__(cls, config: DictConfig): + """Initialize SAM image encoder to the target backbone.""" if "tiny_vit" == config.backbone: return build_tiny_vit(config.image_size) elif "vit" in config.backbone: diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index 9ac772ce4ad..f50afe06c8f 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -6,11 +6,10 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. -# -import re + from collections import OrderedDict -from typing import Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple import torch from omegaconf import DictConfig @@ -139,6 +138,7 @@ def load_checkpoint(self, state_dict: Optional[OrderedDict] = None) -> None: Args: state_dict (Optional[OrderedDict], optional): State dict of SAM. Defaults to None. """ + def skip_unused_parameters(state_dict): if self.config.model.backbone == "tiny_vit": for key in [ @@ -149,7 +149,7 @@ def skip_unused_parameters(state_dict): ]: if key in state_dict: state_dict.pop(key) - + if state_dict: # state_dict from args.load_from skip_unused_parameters(state_dict) @@ -326,7 +326,7 @@ def select_masks(self, masks: Tensor, iou_preds: Tensor, num_points: int) -> Tup iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) return masks, iou_preds - + @classmethod def postprocess_masks(cls, masks: Tensor, input_size: int, orig_size: Tensor) -> Tensor: """Postprocess the predicted masks. @@ -342,7 +342,7 @@ def postprocess_masks(cls, masks: Tensor, input_size: int, orig_size: Tensor) -> """ masks = F.interpolate(masks, size=(input_size, input_size), mode="bilinear", align_corners=False) - prepadded_size = cls.get_prepadded_size(cls, orig_size, input_size) + prepadded_size = cls.get_prepadded_size(cls, orig_size, input_size) # type: ignore[arg-type] masks = masks[..., : prepadded_size[0], : prepadded_size[1]] orig_size = orig_size.to(torch.int64) @@ -391,13 +391,15 @@ def forward_train( for idx_prompt, prompt in enumerate([bboxes[idx], points[idx]]): if prompt is None: continue - + sparse_embeddings, dense_embeddings = self.prompt_encoder( - points=(prompt.unsqueeze(1), torch.ones(len(prompt), 1, device=prompt.device)) if idx_prompt == 1 else None, + points=(prompt.unsqueeze(1), torch.ones(len(prompt), 1, device=prompt.device)) + if idx_prompt == 1 + else None, boxes=prompt if idx_prompt == 0 else None, masks=None, ) - + _low_res_masks, _iou_predictions = self.mask_decoder( image_embeddings=embedding.unsqueeze(0), image_pe=self.prompt_encoder.get_dense_pe(), @@ -439,9 +441,7 @@ def training_step(self, batch, batch_idx) -> Tensor: num_masks = sum(len(pred_mask) for pred_mask in pred_masks) for i, (pred_mask, gt_mask, iou_prediction) in enumerate(zip(pred_masks, gt_masks, iou_predictions)): - pred_mask = self.postprocess_masks( - pred_mask, self.config.model.image_size, batch["original_size"][i] - ) + pred_mask = self.postprocess_masks(pred_mask, self.config.model.image_size, batch["original_size"][i]) pred_mask = pred_mask.sigmoid().squeeze(1) self.train_metrics["train_IoU"].update(pred_mask, gt_mask) self.train_metrics["train_F1"].update(pred_mask, gt_mask) @@ -497,9 +497,7 @@ def validation_step(self, batch, batch_idx) -> MetricCollection: pred_masks, _ = self.forward_train(images, bboxes, points) for i, (pred_mask, gt_mask) in enumerate(zip(pred_masks, gt_masks)): - pred_mask = self.postprocess_masks( - pred_mask, self.config.model.image_size, batch["original_size"][i] - ) + pred_mask = self.postprocess_masks(pred_mask, self.config.model.image_size, batch["original_size"][i]) pred_mask = pred_mask.sigmoid().squeeze(1) for k, v in self.val_metrics.items(): v.update(pred_mask, gt_mask) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index ebdd7ae3783..a7c3181e6b6 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -3,28 +3,28 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from collections import OrderedDict, defaultdict -import os import json +import os +import pickle +from collections import OrderedDict, defaultdict from copy import deepcopy +from datetime import datetime from itertools import product -import pickle from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union -import torch -import numpy as np import cv2 +import numpy as np +import torch from omegaconf import DictConfig -from torch import nn, Tensor +from torch import Tensor, nn from torch.nn import Parameter, ParameterDict from torch.nn import functional as F +from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset import get_transform from otx.api.entities.scored_label import ScoredLabel from otx.utils.logger import get_logger -from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset import get_transform from .segment_anything import SegmentAnything -from datetime import datetime logger = get_logger() @@ -74,7 +74,7 @@ def forward( num_bg_points=num_bg_points, device=device, ) - + pad_size = torch.as_tensor(points_scores.shape[0] - total_points_scores.shape[1]) pad_tot = torch.max(self.zero_tensor, pad_size) pad_cur = torch.max(self.zero_tensor, -pad_size) @@ -92,7 +92,7 @@ def get_prompt_candidates( image_embeddings: Tensor, reference_feat: Tensor, original_size: Tensor, - threshold: Union[Tensor, float] = 0., + threshold: Union[Tensor, float] = 0.0, num_bg_points: Union[Tensor, int] = 1, device: Union[torch.device, str] = torch.device("cpu"), ) -> Tuple[Tensor, Tensor]: @@ -120,7 +120,7 @@ def _point_selection( self, mask_sim: Tensor, original_size: Tensor, - threshold: Union[Tensor, float] = 0., + threshold: Union[Tensor, float] = 0.0, num_bg_points: Union[Tensor, int] = 1, ) -> Tuple[Tensor, Tensor]: """Select point used as point prompts.""" @@ -135,7 +135,7 @@ def _point_selection( point_coords = torch.where(mask_sim > threshold) fg_coords_scores = torch.stack(point_coords[::-1] + (mask_sim[point_coords],), dim=0).T - + # to handle empty tensor len_fg_coords_scores = len(fg_coords_scores) fg_coords_scores = F.pad(fg_coords_scores, (0, 0, 0, max(0, 1 - len_fg_coords_scores)), value=-1) @@ -170,11 +170,20 @@ def _point_selection( class ZeroShotSegmentAnything(SegmentAnything): """Zero-shot learning module using Segment Anything.""" - def __init__(self, config: Optional[DictConfig] = None, manual_config_update: Optional[Dict] = None, state_dict: Optional[OrderedDict] = None) -> None: + def __init__( + self, + config: Optional[DictConfig] = None, + manual_config_update: Optional[Dict] = None, + state_dict: Optional[OrderedDict] = None, + ) -> None: if config is None: config = self.set_default_config() - - if manual_config_update is not None and isinstance(manual_config_update, dict) and len(manual_config_update) > 0: + + if ( + manual_config_update is not None + and isinstance(manual_config_update, dict) + and len(manual_config_update) > 0 + ): for k, v in manual_config_update.items(): exec(f"config.{k} = {v}") @@ -196,17 +205,18 @@ def __init__(self, config: Optional[DictConfig] = None, manual_config_update: Op self.point_labels_box = torch.as_tensor([[2, 3]], dtype=torch.float32) self.has_mask_inputs = [torch.as_tensor([[0.0]]), torch.as_tensor([[1.0]])] - + self.transforms = get_transform( - image_size=config.model.image_size, - mean=config.dataset.normalize.mean, - std=config.dataset.normalize.std) - + image_size=config.model.image_size, mean=config.dataset.normalize.mean, std=config.dataset.normalize.std + ) + self.path_reference_info = "vpm_zsl_reference_infos/{}/reference_info.pt" - - def load_state_dict_pre_hook(self, state_dict: dict[str, Any], prefix: str = "", *args, **kwargs) -> None: + + def load_state_dict_pre_hook(self, state_dict: Dict[str, Any], prefix: str = "", *args, **kwargs) -> None: """Load reference info manually.""" - _reference_feats: Tensor = state_dict.get("reference_info.reference_feats", torch.as_tensor([], dtype=torch.float32)) + _reference_feats: Tensor = state_dict.get( + "reference_info.reference_feats", torch.as_tensor([], dtype=torch.float32) + ) _used_indices: Tensor = state_dict.get("reference_info.used_indices", torch.as_tensor([], dtype=torch.int64)) self.reference_info = ParameterDict( { @@ -231,7 +241,7 @@ def set_default_config(self) -> DictConfig: "mask_threshold": 0.0, "return_single_mask": False, "use_stability_score": False, - "stability_score_offset": 1., + "stability_score_offset": 1.0, "return_extra_metrics": False, }, "dataset": { @@ -239,10 +249,10 @@ def set_default_config(self) -> DictConfig: "mean": [123.675, 116.28, 103.53], "std": [58.395, 57.12, 57.375], } - } + }, } ) - + def set_empty_reference_info(self) -> None: """Set empty reference information.""" reference_feats: Parameter = Parameter(torch.as_tensor([], dtype=torch.float32), requires_grad=False) @@ -254,13 +264,13 @@ def set_empty_reference_info(self) -> None: }, ) self.is_reference_info_empty = True - + def initialize_reference_info(self) -> None: """Initialize reference information.""" self.reference_info["reference_feats"] = Parameter(torch.zeros(0, 1, 256), requires_grad=False) self.reference_info["used_indices"] = Parameter(torch.as_tensor([[]], dtype=torch.int64), requires_grad=False) self.is_reference_info_empty = False - + def expand_reference_info(self, new_largest_label: int) -> None: """Expand reference info dimensions if newly given processed prompts have more lables.""" if new_largest_label > (cur_largest_label := len(self.reference_info["reference_feats"]) - 1): @@ -282,17 +292,17 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ reset_feat (bool): Whether reset reference_info. For OTX standalone, resetting reference_info will be conducted in on_train_start. For other frameworks, setting it to True is required to reset reference_info. Defaults to False. - + Returns: (Tuple[ParameterDict, Tensor]): reference_info and ref_masks. """ if reset_feat: self.initialize_reference_info() - + # preprocess images and prompts transformed_batch = [self.transforms(b.copy()) for b in batch] processed_prompts = [self._preprocess_prompts(tb) for tb in transformed_batch] - + # initialize tensors to contain reference features and prompts largest_label = max([label for pp in processed_prompts for label in pp.keys()]) self.expand_reference_info(largest_label) @@ -301,12 +311,12 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ batch_ref_masks: List[Tensor] = [] for tb, pp in zip(transformed_batch, processed_prompts): # assign components - images = tb["images"].unsqueeze(0).to(self.device) + images = tb["images"].unsqueeze(0).to(self.device) # type: ignore[union-attr] original_size = torch.as_tensor(tb["original_size"]) - + image_embeddings = self.image_encoder(images) processed_embedding = image_embeddings.squeeze().permute(1, 2, 0) - + ref_masks = torch.zeros(largest_label + 1, *map(int, original_size)) for label, input_prompts in pp.items(): # TODO (sungchul): how to skip background class @@ -325,10 +335,10 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ mask_from_polygon = cv2.drawContours(mask_from_polygon, np.asarray([contour]), 0, 1, -1) ref_mask[mask_from_polygon == 1] += 1 elif (prompt := input_prompt.get("scribble_annotation", None)) is not None: - logger.warning(f"scribble_annotation is not supported yet.") + logger.warning("scribble_annotation is not supported yet.") continue elif (prompt := input_prompt.get("scribble_polygon", None)) is not None: - logger.warning(f"scribble_polygon is not supported yet.") + logger.warning("scribble_polygon is not supported yet.") continue else: point_coords = [] @@ -355,18 +365,23 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ default_threshold_reference = deepcopy(self.prompt_getter.default_threshold_reference) while ref_feat is None: logger.info(f"[*] default_threshold_reference : {default_threshold_reference:.4f}") - ref_feat = self._generate_masked_features(processed_embedding, ref_mask, default_threshold_reference) + ref_feat = self._generate_masked_features( + processed_embedding, ref_mask, default_threshold_reference + ) default_threshold_reference -= 0.05 self.reference_info["reference_feats"][label] = ref_feat.detach().cpu() - self.reference_info["used_indices"] = Parameter(torch.cat((self.reference_info["used_indices"], torch.as_tensor([[label]])), dim=1), requires_grad=False) + self.reference_info["used_indices"] = Parameter( + torch.cat((self.reference_info["used_indices"], torch.as_tensor([[label]])), dim=1), + requires_grad=False, + ) ref_masks[label] = ref_mask.detach().cpu() batch_ref_masks.append(ref_masks) return self.reference_info, batch_ref_masks @torch.no_grad() def infer( - self, + self, batch: List[Dict[str, Any]], reference_feats: Union[np.ndarray, Tensor], used_indices: Union[np.ndarray, Tensor], @@ -378,8 +393,10 @@ def infer( Args: batch (List[Dict[str, Any]]): List of dictionaries containing images and metas. - reference_feats (Union[np.ndarray, Tensor]): Reference features for target prediction. If it is np.ndarray, it will be converted to torch tensor. - used_indices (Union[np.ndarray, Tensor]): To check which indices of reference features are validate. If it is np.ndarray, it will be converted to torch tensor. + reference_feats (Union[np.ndarray, Tensor]): Reference features for target prediction. + If it is np.ndarray, it will be converted to torch tensor. + used_indices (Union[np.ndarray, Tensor]): To check which indices of reference features are validate. + If it is np.ndarray, it will be converted to torch tensor. is_cascade (bool): Whether use cascade inference. Defaults to False. Returns: @@ -395,19 +412,20 @@ def infer( # preprocess images and prompts transformed_batch = [self.transforms(b.copy()) for b in batch] - + total_results: List[List[Tensor]] = [] for tb in transformed_batch: # assign components - images = tb["images"].unsqueeze(0).to(self.device) + images = tb["images"].unsqueeze(0).to(self.device) # type: ignore[union-attr] original_size = torch.as_tensor(tb["original_size"]) - + image_embeddings = self.image_encoder(images) total_points_scores, total_bg_coords = self.prompt_getter( image_embeddings=image_embeddings, reference_feats=reference_feats, used_indices=used_indices, - original_size=original_size) + original_size=original_size, + ) predicted_masks: defaultdict = defaultdict(list) used_points: defaultdict = defaultdict(list) for label, (points_scores, bg_coords) in enumerate(zip(total_points_scores, total_bg_coords)): @@ -491,6 +509,7 @@ def _predict_masks( is_cascade: bool = True, ) -> Tensor: """Predict target masks.""" + masks: Tensor logits: Tensor scores: Tensor for i in range(3): @@ -517,9 +536,12 @@ def _predict_masks( coords = torch.nonzero(masks) y, x = coords[:, 0], coords[:, 1] box_coords = self._preprocess_coords( - torch.as_tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), + torch.as_tensor( + [[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device + ), original_size, - self.config.model.image_size) + self.config.model.image_size, + ) point_coords = torch.cat((point_coords, box_coords), dim=1) point_labels = torch.cat((point_labels, self.point_labels_box.to(self.device)), dim=1) @@ -544,7 +566,7 @@ def predict_step(self, batch, batch_idx): results = self.infer(batch, self.reference_info["reference_feats"], self.reference_info["used_indices"]) return [result[0] for result in results] # tmp: only mask - def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[int, List[Dict[str, Tensor]]]: + def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[Any, Any]: """Preprocess prompts. Currently, preprocessing for bounding boxes is only supported. @@ -553,7 +575,7 @@ def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[int, List[Dict[str, batch (Dict[str, Any]): Dictionary containing data and prompts information. Returns: - (defaultdict[int, List[Dict[str, Tensor]]]): Processed and arranged each single prompt + (Dict[Any, Any]): Processed and arranged each single prompt using label information as keys. Unlike other prompts, `annotation` prompts will be aggregated as single annotation. """ @@ -578,25 +600,31 @@ def _preprocess_prompts(self, batch: Dict[str, Any]) -> Dict[int, List[Dict[str, masks.append(mask_from_polygon) processed_prompts[label].append({prompt_name: torch.tensor(prompt, device=self.device)}) elif prompt_name == "bboxes": - processed_prompts[label].append({ - prompt_name: { - "point_coords": torch.as_tensor(prompt.reshape(-1, 2, 2), device=self.device), - "point_labels": torch.tensor([[1]], device=self.device), - }}) + processed_prompts[label].append( + { + prompt_name: { + "point_coords": torch.as_tensor(prompt.reshape(-1, 2, 2), device=self.device), + "point_labels": torch.tensor([[1]], device=self.device), + } + } + ) elif prompt_name == "points": - processed_prompts[label].append({ - prompt_name: { - "point_coords": torch.as_tensor(prompt.reshape(-1, 2), device=self.device), - "point_labels": torch.tensor([[1]], device=self.device), - }}) + processed_prompts[label].append( + { + prompt_name: { + "point_coords": torch.as_tensor(prompt.reshape(-1, 2), device=self.device), + "point_labels": torch.tensor([[1]], device=self.device), + } + } + ) - processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x)) + processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x)) # type: ignore[assignment] return processed_prompts - + def _preprocess_coords( self, coords: Tensor, - ori_shape: Union[list[int], tuple[int, int], Tensor], + ori_shape: Union[List[int], Tuple[int, int], Tensor], target_length: int, ) -> Tensor: """Expects a torch tensor of length 2 in the final dimension. @@ -605,7 +633,7 @@ def _preprocess_coords( Args: coords (Tensor): Coordinates tensor. - ori_shape (Union[list[int], tuple[int, int], Tensor]): Original size of image. + ori_shape (Union[List[int], Tuple[int, int], Tensor]): Original size of image. target_length (int): The length of the longest side of the image. Returns: @@ -700,17 +728,18 @@ def set_metrics(self) -> None: def configure_optimizers(self) -> None: """Skip configure_optimizers unused in zero-shot learning.""" pass - + def _find_latest_reference_info(self) -> Union[str, None]: """Find latest reference info to be used.""" if len(stamps := sorted(os.listdir("vpm_zsl_reference_infos"), reverse=True)) > 0: return stamps[0] self.initialize_reference_info() + return None def on_train_start(self) -> None: """Called at the beginning of training after sanity check.""" self.initialize_reference_info() - + def on_predict_start(self) -> None: """Called at the beginning of predicting.""" if (latest_stamp := self._find_latest_reference_info()) is not None: @@ -721,11 +750,19 @@ def on_predict_start(self) -> None: def training_epoch_end(self, outputs) -> None: """Called in the training loop at the very end of the epoch.""" - self.reference_info["used_indices"] = Parameter(self.reference_info["used_indices"].unique().unsqueeze(0), requires_grad=False) + self.reference_info["used_indices"] = Parameter( + self.reference_info["used_indices"].unique().unsqueeze(0), requires_grad=False + ) if self.config.model.save_outputs: path_reference_info = self.path_reference_info.format(datetime.now().strftime("%Y%m%d-%H%M%S")) os.makedirs(os.path.dirname(path_reference_info), exist_ok=True) torch.save(self.reference_info, path_reference_info) - pickle.dump({k: v.numpy() for k, v in self.reference_info.items()}, open(path_reference_info.replace(".pt", ".pickle"), "wb")) - json.dump(repr(self.trainer.datamodule.train_dataset.dataset), open(path_reference_info.replace("reference_info.pt", "reference_meta.json"), "w")) + pickle.dump( + {k: v.numpy() for k, v in self.reference_info.items()}, + open(path_reference_info.replace(".pt", ".pickle"), "wb"), + ) + json.dump( + repr(self.trainer.datamodule.train_dataset.dataset), + open(path_reference_info.replace("reference_info.pt", "reference_meta.json"), "w"), + ) logger.info(f"Saved reference info at {path_reference_info}") diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index f9c705a2489..a9b6105bb12 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -179,7 +179,7 @@ def get_model(config: DictConfig, train_type: TrainType, state_dict: Optional[Or SegmentAnything as VisualPrompter, ) elif train_type == TrainType.Zeroshot: - from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models import ( + from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models import ( # type: ignore[assignment] # noqa: E501 ZeroShotSegmentAnything as VisualPrompter, ) @@ -305,7 +305,7 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), "has_mask_input": torch.tensor([[1]], dtype=torch.float32), - "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.int64) + "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.int64), } output_names = ["upscaled_masks", "iou_predictions", "low_res_masks"] model_to_export = self.model @@ -671,7 +671,7 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), "has_mask_input": torch.tensor([[1]], dtype=torch.float32), - "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.int64) + "orig_size": torch.randint(low=256, high=2048, size=(1, 2), dtype=torch.int64), } output_names = ["upscaled_masks", "iou_predictions", "low_res_masks"] model_to_export = self.model diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 1a293313ced..9db584f6af2 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -15,9 +15,9 @@ # and limitations under the License. import io -import pickle import json import os +import pickle import random import tempfile import time @@ -150,13 +150,17 @@ def __init__( self.transform = get_transform() # TODO (sungchul): insert args def pre_process( - self, dataset_item: DatasetItemEntity, extra_processing: bool = False, use_bbox: bool = False, use_point: bool = False, + self, + dataset_item: DatasetItemEntity, + extra_processing: bool = False, + use_bbox: bool = False, + use_point: bool = False, ) -> Tuple[Dict[str, Any], Dict[str, Any], List[Dict[str, Any]]]: """Pre-process function of OpenVINO Visual Prompting Inferencer for image encoder.""" if use_bbox and use_point: logger.warning("If both use_bbox and use_point are set, bboxes and points will be generated randomly.") - - prob = 1. if not use_point else 0. if not use_bbox and use_point else 0.5 + + prob = 1.0 if not use_point else 0.0 if not use_bbox and use_point else 0.5 images, meta = self.model["image_encoder"].preprocess(dataset_item.numpy, extra_processing) prompts = OTXVisualPromptingDataset.get_prompts(dataset_item, self.labels, prob=prob) prompts = self.model["decoder"].preprocess(prompts, meta) @@ -289,12 +293,13 @@ def __init__( self.point_labels_box = np.array([[2, 3]], dtype=np.float32) self.has_mask_inputs = [np.array([[0.0]]), np.array([[1.0]])] - + self.reference_feats = None self.used_indices = None - self.path_reference_info = "vpm_zsl_reference_infos/{}/reference_info.pickle" - def pre_process_image_encoder(self, inputs: np.ndarray, extra_processing: bool = False) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: + def pre_process_image_encoder( + self, inputs: np.ndarray, extra_processing: bool = False + ) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: """Pre-process function of OpenVINO Zero-shot Visual Prompting Inferencer for image encoder.""" return self.model["image_encoder"].preprocess(inputs, extra_processing) @@ -315,11 +320,13 @@ def pre_process_prompt_getter( } inputs_prompt_getter.update(image_embeddings) return inputs_prompt_getter - + def learn(self, images: np.ndarray): - """Learn""" - - def infer(self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray) -> List: + """Learn.""" + + def infer( + self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray + ) -> Tuple[List[Any], DefaultDict[Any, Any], DefaultDict[Any, Any]]: """Perform a prediction for a given input image.""" # forward image encoder images, meta = self.pre_process_image_encoder(images) @@ -327,7 +334,9 @@ def infer(self, images: np.ndarray, reference_feats: np.ndarray, used_indices: n image_embeddings = self.forward_image_encoder(images) # get point candidates - inputs_prompt_getter = self.pre_process_prompt_getter(image_embeddings, reference_feats, used_indices, original_size) + inputs_prompt_getter = self.pre_process_prompt_getter( + image_embeddings, reference_feats, used_indices, original_size + ) total_prompts = self.forward_prompt_getter(inputs_prompt_getter) annotations: DefaultDict = defaultdict(list) @@ -355,7 +364,8 @@ def infer(self, images: np.ndarray, reference_feats: np.ndarray, used_indices: n inputs_decoder = { "point_coords": point_coords[None], "point_labels": point_labels[None], - "orig_size": original_size[None]} + "orig_size": original_size[None], + } inputs_decoder.update(image_embeddings) prediction = self.forward_decoder(inputs_decoder, original_size) @@ -373,12 +383,6 @@ def infer(self, images: np.ndarray, reference_feats: np.ndarray, used_indices: n self._inspect_overlapping_areas(predicted_masks, used_points, annotations) return sum(annotations.values(), []), predicted_masks, used_points - def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore - """Perform a prediction for a given input image.""" - self._get_reference_info() - results = self.infer(dataset_item.numpy, self.reference_feats, self.used_indices) - return results[0] - def forward_prompt_getter(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" return self.model["prompt_getter"].infer_sync(inputs) @@ -387,6 +391,7 @@ def forward_decoder( # type: ignore self, inputs: Dict[str, np.ndarray], original_size: np.ndarray ) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" + masks: np.ndarray logits: np.ndarray scores: np.ndarray for i in range(3): @@ -399,7 +404,7 @@ def forward_decoder( # type: ignore elif i == 1: # Cascaded Post-refinement-1 - mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) + mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) # noqa: F821 if masks.sum() == 0: return {"upscaled_masks": masks} @@ -407,7 +412,7 @@ def forward_decoder( # type: ignore elif i == 2: # Cascaded Post-refinement-2 - mask_input, masks = self._postprocess_masks(masks, logits, scores) + mask_input, masks = self._postprocess_masks(masks, logits, scores) # noqa: F821 if masks.sum() == 0: return {"upscaled_masks": masks} @@ -416,14 +421,20 @@ def forward_decoder( # type: ignore box_coords = self.model["decoder"]._apply_coords( np.array([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=np.float32), original_size ) - inputs.update({ - "point_coords": np.concatenate((inputs["point_coords"], box_coords), axis=1), - "point_labels": np.concatenate((inputs["point_labels"], self.point_labels_box), axis=1), - }) + inputs.update( + { + "point_coords": np.concatenate((inputs["point_coords"], box_coords), axis=1), + "point_labels": np.concatenate((inputs["point_labels"], self.point_labels_box), axis=1), + } + ) inputs.update({"mask_input": mask_input, "has_mask_input": has_mask_input}) prediction = self.model["decoder"].infer_sync(inputs) - upscaled_masks, scores, logits = prediction["upscaled_masks"], prediction["iou_predictions"], prediction["low_res_masks"] + upscaled_masks, scores, logits = ( + prediction["upscaled_masks"], + prediction["iou_predictions"], + prediction["low_res_masks"], + ) masks = upscaled_masks > self.model["decoder"].mask_threshold _, masks = self._postprocess_masks(masks, logits, scores) @@ -492,21 +503,30 @@ def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): other_masks.pop(jm) used_points[other_label].pop(jm) annotations[other_label].pop(jm) - + + def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore + """Perform a prediction for a given input image.""" + if self.reference_feats is None and self.used_indices is None: + self.reference_feats, self.used_indices = self._get_reference_info() + results = self.infer(dataset_item.numpy, self.reference_feats, self.used_indices) + return results[0] + def _find_latest_reference_info(self) -> Union[str, None]: """Find latest reference info to be used.""" if len(stamps := sorted(os.listdir("vpm_zsl_reference_infos"), reverse=True)) > 0: return stamps[0] return None - - def _get_reference_info(self) -> None: + + def _get_reference_info( + self, path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle" + ) -> Union[Tuple[np.ndarray, np.ndarray], None]: """Get reference info through loading previously saved one or running `learn`.""" - if self.reference_feats is None and self.used_indices is None and (latest_stamp := self._find_latest_reference_info()) is not None: + if (latest_stamp := self._find_latest_reference_info()) is not None: # load previously saved reference info - latest_reference_info = self.path_reference_info.format(latest_stamp) + latest_reference_info = path_reference_info.format(latest_stamp) reference_info = pickle.load(open(latest_reference_info, "rb")) - self.reference_feats = reference_info["reference_feats"] - self.used_indices = reference_info["used_indices"] + return reference_info["reference_feats"], reference_info["used_indices"] + return None, None class OTXOpenVinoDataLoader: @@ -519,6 +539,7 @@ def __init__( module_name: str, shuffle: bool = True, output_model: Optional[ModelEntity] = None, + **kwargs, ): self.dataset = dataset self.inferencer = inferencer @@ -577,6 +598,8 @@ def __init__( module_name: str, shuffle: bool = True, output_model: Optional[ModelEntity] = None, + reference_feats: Optional[np.ndarray] = None, + used_indices: Optional[np.ndarray] = None, ): super().__init__( dataset=dataset, inferencer=inferencer, module_name=module_name, shuffle=shuffle, output_model=output_model @@ -584,6 +607,10 @@ def __init__( if self.module_name == "decoder": self.prompt_getter = self._load_module("prompt_getter", output_model) + self.inferencer: OpenVINOZeroShotVisualPromptingInferencer + self.reference_feats = reference_feats + self.used_indices = used_indices + def __getitem__(self, index: int) -> Dict[str, Any]: """Get item from dataset.""" images: Dict[str, np.ndarray] @@ -592,7 +619,7 @@ def __getitem__(self, index: int) -> Dict[str, Any]: index = self.shuffler[index] items = self.dataset[index] - images, meta = self.inferencer.pre_process(items, extra_processing=True) # type: ignore + images, meta = self.inferencer.pre_process_image_encoder(items.numpy, extra_processing=True) # type: ignore original_size = np.asarray(meta["original_shape"][:2]) _, _, h, w = images["images"].shape pad_width = ((0, 0), (0, 0), (0, self.target_length - h), (0, self.target_length - w)) @@ -601,7 +628,9 @@ def __getitem__(self, index: int) -> Dict[str, Any]: return images else: image_embeddings = self.image_encoder(images["images"]) - inputs_prompt_getter = self.inferencer.pre_process_prompt_getter(image_embeddings, original_size) + inputs_prompt_getter = self.inferencer.pre_process_prompt_getter( + image_embeddings, self.reference_feats, self.used_indices, original_size + ) if self.module_name == "prompt_getter": return inputs_prompt_getter @@ -803,6 +832,7 @@ def optimize( optimization_parameters: Optional[OptimizationParameters] = None, module_names: List[str] = ["image_encoder", "decoder"], ov_dataloader: Type[OTXOpenVinoDataLoader] = OTXOpenVinoDataLoader, + **kwargs, ): """Optimize function of OpenVINOVisualPromptingTask.""" logger.info("Start PTQ optimization") @@ -815,7 +845,9 @@ def optimize( dataset = dataset.get_subset(Subset.TRAINING) for i, module_name in enumerate(module_names, 1): - data_loader = ov_dataloader(dataset, self.inferencer, module_name=module_name, output_model=output_model) + data_loader = ov_dataloader( + dataset, self.inferencer, module_name=module_name, output_model=output_model, **kwargs + ) quantization_dataset = nncf.Dataset(data_loader, lambda data: data) with tempfile.TemporaryDirectory() as tempdir: @@ -903,8 +935,11 @@ def optimize( optimization_parameters: Optional[OptimizationParameters] = None, module_names: List[str] = ["image_encoder", "prompt_getter", "decoder"], ov_dataloader: Type[OTXOpenVinoDataLoader] = OTXZeroShotOpenVinoDataLoader, + **kwargs, ): """Optimize function of OpenVINOZeroShotVisualPromptingTask.""" + self.inferencer: OpenVINOZeroShotVisualPromptingInferencer + reference_feats, used_indices = self.inferencer._get_reference_info() return super().optimize( optimization_type=optimization_type, dataset=dataset, @@ -912,4 +947,6 @@ def optimize( optimization_parameters=optimization_parameters, module_names=module_names, ov_dataloader=ov_dataloader, + reference_feats=reference_feats, + used_indices=used_indices, ) diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index 1518dbd6131..d000c818c9f 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -56,13 +56,13 @@ def test_parameters(self): assert params.get("sim_threshold").default_value == 0.5 assert params.get("num_bg_points").default_value == 1 - + @e2e_pytest_unit def test_get_inputs(self, mocker): """Test _get_inputs.""" mocker.patch.object(ImageModel, "__init__") prompt_getter = PromptGetter("adapter") - + prompt_getter.inputs = { "image_embeddings": np.ones((1, 4, 4, 3)), "reference_feats": np.ones((2, 1, 256)), @@ -102,33 +102,30 @@ def test_get_outputs(self): assert "upscaled_masks" == results @e2e_pytest_unit - @pytest.mark.parametrize("prompts,expected", - [ - ( - { - "bboxes": [np.array([[1, 1], [2, 2]])], - "points": [], - "labels": {"bboxes": [1]}, - "original_size": (4, 4) - }, - { - "point_coords": (1, 2, 2), - "point_labels": (1, 2), - } - ), - ( - { - "bboxes": [], - "points": [np.array([[1, 1]])], - "labels": {"points": [1]}, - "original_size": (4, 4) - }, - { - "point_coords": (1, 1, 2), - "point_labels": (1, 1), - } - ) - ]) + @pytest.mark.parametrize( + "prompts,expected", + [ + ( + { + "bboxes": [np.array([[1, 1], [2, 2]])], + "points": [], + "labels": {"bboxes": [1]}, + "original_size": (4, 4), + }, + { + "point_coords": (1, 2, 2), + "point_labels": (1, 2), + }, + ), + ( + {"bboxes": [], "points": [np.array([[1, 1]])], "labels": {"points": [1]}, "original_size": (4, 4)}, + { + "point_coords": (1, 1, 2), + "point_labels": (1, 1), + }, + ), + ], + ) def test_preprocess(self, prompts: Dict[str, Any], expected: Dict[str, Any]): """Test preprocess""" results = self.decoder.preprocess(prompts, {}) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py index 0bac7032fa1..8053f3dcc6c 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py @@ -77,9 +77,15 @@ def test_on_predict_epoch_end(self, use_mask: bool, expected: Any): { "masks": [torch.Tensor([[[0, 1, 0], [1, 1, 1], [0, 1, 0]]])], "iou_predictions": [torch.Tensor([[0.9]])], - "labels": [{ - "bboxes": [ScoredLabel(label=LabelEntity("foreground", domain=Domain.VISUAL_PROMPTING), probability=0.0)], - }], + "labels": [ + { + "bboxes": [ + ScoredLabel( + label=LabelEntity("foreground", domain=Domain.VISUAL_PROMPTING), probability=0.0 + ) + ], + } + ], } ] ] diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py index c9e32b6f28e..80890c5a155 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py @@ -21,76 +21,79 @@ @e2e_pytest_unit -@pytest.mark.parametrize("batch,expected",[ - ( - [ - { - "index": 0, - "images": Tensor([1, 2, 3]), - "bboxes": Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), - "points": torch.zeros((0, 2)), - "gt_masks": [Tensor([1, 2, 3])], - "original_size": Tensor([1, 3]), - "path": [], - "labels": [], - }, - { - "index": 1, - "images": Tensor([4, 5, 6]), - "bboxes": Tensor([[9, 10, 11, 12]]), - "points": torch.zeros((0, 2)), - "gt_masks": [Tensor([4, 5, 6])], - "original_size": Tensor([1, 3]), - "path": [], - "labels": [], - }, - ], - { - "index": [0, 1], - "images": Tensor([[1, 2, 3], [4, 5, 6]]), - "bboxes": [Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), Tensor([[9, 10, 11, 12]])], - "points": [None, None], - "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], - "original_size": [Tensor([1, 3]), Tensor([1, 3])], - "path": [[], []], - "labels": [[], []], - } - ), - ( - [ +@pytest.mark.parametrize( + "batch,expected", + [ + ( + [ + { + "index": 0, + "images": Tensor([1, 2, 3]), + "bboxes": Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), + "points": torch.zeros((0, 2)), + "gt_masks": [Tensor([1, 2, 3])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + { + "index": 1, + "images": Tensor([4, 5, 6]), + "bboxes": Tensor([[9, 10, 11, 12]]), + "points": torch.zeros((0, 2)), + "gt_masks": [Tensor([4, 5, 6])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + ], { - "index": 0, - "images": Tensor([1, 2, 3]), - "bboxes": torch.zeros((0, 4)), - "points": Tensor([[1, 1]]), - "gt_masks": [Tensor([1, 2, 3])], - "original_size": Tensor([1, 3]), - "path": [], - "labels": [], + "index": [0, 1], + "images": Tensor([[1, 2, 3], [4, 5, 6]]), + "bboxes": [Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), Tensor([[9, 10, 11, 12]])], + "points": [None, None], + "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], + "original_size": [Tensor([1, 3]), Tensor([1, 3])], + "path": [[], []], + "labels": [[], []], }, + ), + ( + [ + { + "index": 0, + "images": Tensor([1, 2, 3]), + "bboxes": torch.zeros((0, 4)), + "points": Tensor([[1, 1]]), + "gt_masks": [Tensor([1, 2, 3])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + { + "index": 1, + "images": Tensor([4, 5, 6]), + "bboxes": torch.zeros((0, 4)), + "points": Tensor([[2, 2]]), + "gt_masks": [Tensor([4, 5, 6])], + "original_size": Tensor([1, 3]), + "path": [], + "labels": [], + }, + ], { - "index": 1, - "images": Tensor([4, 5, 6]), - "bboxes": torch.zeros((0, 4)), - "points": Tensor([[2, 2]]), - "gt_masks": [Tensor([4, 5, 6])], - "original_size": Tensor([1, 3]), - "path": [], - "labels": [], + "index": [0, 1], + "images": Tensor([[1, 2, 3], [4, 5, 6]]), + "bboxes": [None, None], + "points": [Tensor([[1, 1]]), Tensor([[2, 2]])], + "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], + "original_size": [Tensor([1, 3]), Tensor([1, 3])], + "path": [[], []], + "labels": [[], []], }, - ], - { - "index": [0, 1], - "images": Tensor([[1, 2, 3], [4, 5, 6]]), - "bboxes": [None, None], - "points": [Tensor([[1, 1]]), Tensor([[2, 2]])], - "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], - "original_size": [Tensor([1, 3]), Tensor([1, 3])], - "path": [[], []], - "labels": [[], []], - } - ) -]) + ), + ], +) def test_collate_fn(batch: List[Dict[str, Any]], expected: Dict[str, Any]): """Test collate_fn.""" results = collate_fn(batch) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py index 5c2c652ed2e..66ae6958f0b 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_image_encoder.py @@ -25,9 +25,10 @@ class TestSAMImageEncoder: @pytest.fixture() def config(self, mocker) -> DictConfig: return DictConfig(dict(image_size=1024)) - + @e2e_pytest_unit - @pytest.mark.parametrize("backbone,expected", + @pytest.mark.parametrize( + "backbone,expected", [ ("tiny_vit", "TinyViT"), ("vit_b", "ViT"), diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index 24ae52aa3cb..8f3f727e533 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -490,9 +490,7 @@ def test_predict_step(self, mocker, return_logits: bool, expected: Tensor) -> No (6, torch.tensor((8, 8)), (1, 8, 8)), ], ) - def test_postprocess_masks( - self, input_size: int, original_size: Tuple[int], expected: Tuple[int] - ) -> None: + def test_postprocess_masks(self, input_size: int, original_size: Tuple[int], expected: Tuple[int]) -> None: """Test postprocess_masks.""" sam = SegmentAnything(config=self.base_config) masks = torch.zeros((1, 1, 4, 4)) @@ -500,7 +498,7 @@ def test_postprocess_masks( results = sam.postprocess_masks(masks, input_size, original_size) assert results.shape[1:] == expected - + @e2e_pytest_unit @pytest.mark.parametrize( "input_image_size,expected", diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index e48c8b88e03..047dcb4f8bf 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -45,33 +45,43 @@ def test_set_default_thresholds(self) -> None: assert self.prompt_getter.default_threshold_target == 0.7 @e2e_pytest_unit - @pytest.mark.parametrize("result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])]) + @pytest.mark.parametrize( + "result_point_selection", + [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])], + ) def test_forward(self, mocker, result_point_selection: torch.Tensor) -> None: """Test forward.""" mocker.patch.object( - self.prompt_getter, - "get_prompt_candidates", - return_value=(result_point_selection, torch.zeros(1, 2))) + self.prompt_getter, "get_prompt_candidates", return_value=(result_point_selection, torch.zeros(1, 2)) + ) image_embeddings = torch.ones(1, 4, 4, 4) reference_feats = torch.rand(1, 1, 4) used_indices = torch.as_tensor([[0]]) original_size = torch.tensor((self.prompt_getter.image_size, self.prompt_getter.image_size), dtype=torch.int64) total_points_scores, total_bg_coords = self.prompt_getter( - image_embeddings=image_embeddings, reference_feats=reference_feats, used_indices=used_indices, original_size=original_size + image_embeddings=image_embeddings, + reference_feats=reference_feats, + used_indices=used_indices, + original_size=original_size, ) - + assert total_points_scores.shape[0] == 1 assert total_bg_coords.shape[0] == 1 @e2e_pytest_unit - @pytest.mark.parametrize("result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])]) + @pytest.mark.parametrize( + "result_point_selection", + [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])], + ) def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tensor) -> None: """Test get_prompt_candidates.""" mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" ) - mocker.patch.object(self.prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2))) + mocker.patch.object( + self.prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2)) + ) image_embeddings = torch.ones(1, 4, 4, 4) reference_feat = torch.rand(1, 4) original_size = torch.tensor( @@ -89,9 +99,13 @@ def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tenso @pytest.mark.parametrize( "mask_sim,expected", [ - (torch.arange(0.1, 1.0, 0.1).reshape(3, 3), torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]])), - (torch.zeros(3, 3), torch.tensor([[-1, -1, -1]])) - ]) + ( + torch.arange(0.1, 1.0, 0.1).reshape(3, 3), + torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), + ), + (torch.zeros(3, 3), torch.tensor([[-1, -1, -1]])), + ], + ) def test_point_selection(self, mask_sim: torch.Tensor, expected: torch.Tensor) -> None: """Test _point_selection.""" points_scores, bg_coords = self.prompt_getter._point_selection( @@ -106,7 +120,9 @@ def test_point_selection(self, mask_sim: torch.Tensor, expected: torch.Tensor) - class TestZeroShotSegmentAnything: @pytest.fixture def set_zero_shot_segment_anything(self, monkeypatch): - def zero_shot_segment_anything(manual_config_update: Optional[Dict] = None, state_dict: Optional[OrderedDict] = None): + def zero_shot_segment_anything( + manual_config_update: Optional[Dict] = None, state_dict: Optional[OrderedDict] = None + ): monkeypatch.setattr( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SAMImageEncoder", MockImageEncoder, @@ -133,7 +149,7 @@ def test_init(self, set_zero_shot_segment_anything, state_dict: Optional[Dict[st state_dict = set_zero_shot_segment_anything().state_dict() state_dict.pop("reference_info.reference_feats") state_dict.pop("reference_info.used_indices") - + zero_shot_segment_anything = set_zero_shot_segment_anything(state_dict=state_dict) assert zero_shot_segment_anything.config.model.freeze_image_encoder @@ -177,14 +193,16 @@ def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: ) mocker.patch.object(zero_shot_segment_anything, "_generate_masked_features", return_value=torch.ones(1, 256)) - batch = [{ - "images": np.ones((4, 4, 3), dtype=np.uint8), - "gt_masks": np.ones((4, 4), dtype=np.uint8), - "bboxes": np.array([[0, 0, 1, 1]], dtype=np.float32), - "points": np.zeros((0, 2), dtype=np.float32), - "labels": {"bboxes": [MockScoredLabel(label=0, name="label")]}, - "original_size": np.array([4, 4], dtype=np.int64) - }] + batch = [ + { + "images": np.ones((4, 4, 3), dtype=np.uint8), + "gt_masks": np.ones((4, 4), dtype=np.uint8), + "bboxes": np.array([[0, 0, 1, 1]], dtype=np.float32), + "points": np.zeros((0, 2), dtype=np.float32), + "labels": {"bboxes": [MockScoredLabel(label=0, name="label")]}, + "original_size": np.array([4, 4], dtype=np.int64), + } + ] zero_shot_segment_anything.learn(batch=batch, reset_feat=True) assert zero_shot_segment_anything.reference_info.reference_feats.shape == (1, 1, 256) @@ -203,14 +221,18 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect reference_feats = nn.Parameter(torch.rand(1, 1, 256), requires_grad=False) used_indices = nn.Parameter(torch.as_tensor([[0]], dtype=torch.int64), requires_grad=False) mocker.patch.object( - SegmentAnything, "forward", return_value=(torch.ones(1, 4, 4, 4), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) + SegmentAnything, + "forward", + return_value=(torch.ones(1, 4, 4, 4), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)), ) - batch = [{ - "images": np.ones((4, 4, 3), dtype=np.uint8), - "gt_masks": np.ones((4, 4), dtype=np.uint8), - "original_size": np.array([4, 4], dtype=np.int64) - }] + batch = [ + { + "images": np.ones((4, 4, 3), dtype=np.uint8), + "gt_masks": np.ones((4, 4), dtype=np.uint8), + "original_size": np.array([4, 4], dtype=np.int64), + } + ] total_results = zero_shot_segment_anything.infer( batch=batch, reference_feats=reference_feats, @@ -220,11 +242,13 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect for i, results in enumerate(total_results[0]): for _, result in results.items(): assert torch.equal(result[0], expected[i]) - + @e2e_pytest_unit def test_inspect_overlapping_areas(self, mocker, set_zero_shot_segment_anything) -> None: """Test _inspect_overlapping_areas.""" - mocker.patch("otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_checkpoint") + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_checkpoint" + ) zero_shot_segment_anything = set_zero_shot_segment_anything() predicted_masks = { 0: [ @@ -327,7 +351,9 @@ def test_inspect_overlapping_areas(self, mocker, set_zero_shot_segment_anything) def test_predict_masks(self, mocker, set_zero_shot_segment_anything) -> None: """Test _predict_masks.""" mocker.patch.object( - SegmentAnything, "forward", return_value=(torch.ones(1, 4, 8, 8), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) + SegmentAnything, + "forward", + return_value=(torch.ones(1, 4, 8, 8), torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)), ) zero_shot_segment_anything = set_zero_shot_segment_anything() @@ -348,10 +374,10 @@ def test_preprocess_prompts(self, set_zero_shot_segment_anything) -> None: transformed_batch = { "bboxes": torch.tensor([[0, 0, 1, 1]]), "points": torch.tensor([[2, 2]]), - "labels": {"bboxes": [MockScoredLabel(label=1)], "points": [MockScoredLabel(label=1)]} + "labels": {"bboxes": [MockScoredLabel(label=1)], "points": [MockScoredLabel(label=1)]}, } processed_prompts = zero_shot_segment_anything._preprocess_prompts(transformed_batch) - + for prompts in processed_prompts.values(): for prompt in prompts: if "bboxes" in prompt: diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index 59c5fa37215..d595227fa7c 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -381,7 +381,11 @@ def test_save_model(self, mocker): mocker_otx_model = mocker.patch("otx.api.entities.model.ModelEntity") mocker_io_bytes_io = mocker.patch("io.BytesIO") mocker_torch_save = mocker.patch("torch.save") - mocker.patch.object(self.zero_shot_task.model, "state_dict", return_value={"reference_info.reference_feats": None, "reference_info.used_indices": None}) + mocker.patch.object( + self.zero_shot_task.model, + "state_dict", + return_value={"reference_info.reference_feats": None, "reference_info.used_indices": None}, + ) self.zero_shot_task.model.reference_info = "reference_info" diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 9196204dbaa..cdfdab44739 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -223,7 +223,9 @@ def test_predict(self, mocker): "forward_prompt_getter", return_value={"total_points_scores": np.array([[[1, 1, 1]]]), "total_bg_coords": np.array([[[2, 2]]])}, ) - mocker_forward_decoder = mocker.patch.object(OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value={}) + mocker_forward_decoder = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value={} + ) mocker_post_process = mocker.patch.object( OpenVINOZeroShotVisualPromptingInferencer, "post_process", return_value=(self.fake_annotation, None, None) ) @@ -243,12 +245,20 @@ def test_predict(self, mocker): [ ( (np.ones((1, 1)), np.ones((3, 3))), - {"upscaled_masks": np.ones((3, 3)), "iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + { + "upscaled_masks": np.ones((3, 3)), + "iou_predictions": np.array([[0.9]]), + "low_res_masks": np.ones((1, 1, 2, 2)), + }, {"upscaled_masks": np.ones((3, 3))}, ), ( (np.zeros((2, 2)), np.zeros((3, 3))), - {"upscaled_masks": np.zeros((3, 3)), "iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + { + "upscaled_masks": np.zeros((3, 3)), + "iou_predictions": np.array([[0.9]]), + "low_res_masks": np.ones((1, 1, 2, 2)), + }, {"upscaled_masks": np.zeros((3, 3))}, ), ], @@ -269,7 +279,9 @@ def test_forward_decoder( "_apply_coords", return_value=np.array([[[1, 1]]], dtype=np.float32), ) - mocker.patch.object(self.zero_shot_visual_prompting_ov_inferencer, "_postprocess_masks", return_value=postprocess_output) + mocker.patch.object( + self.zero_shot_visual_prompting_ov_inferencer, "_postprocess_masks", return_value=postprocess_output + ) result = self.zero_shot_visual_prompting_ov_inferencer.forward_decoder( inputs={ @@ -288,7 +300,7 @@ def test_forward_decoder( [ ( np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[None], 4, axis=0)[None], - np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_) + np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_), ), ( np.concatenate( @@ -298,7 +310,7 @@ def test_forward_decoder( ), axis=1, ), - np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_) + np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_), ), (np.zeros((1, 4, 3, 3)), np.zeros((3, 3))), ], @@ -309,11 +321,12 @@ def test_postprocess_masks(self, masks: np.ndarray, expected_masks: np.ndarray): self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].image_size = 3 _, result_masks = self.zero_shot_visual_prompting_ov_inferencer._postprocess_masks( - masks=masks, logits=np.empty((1, 4, 2, 2)), scores=np.array([[0.5, 0.7, 0.8, 0.9]])) + masks=masks, logits=np.empty((1, 4, 2, 2)), scores=np.array([[0.5, 0.7, 0.8, 0.9]]) + ) assert result_masks.shape == (3, 3) assert np.all(result_masks == expected_masks) - + @e2e_pytest_unit def test_inspect_overlapping_areas(self) -> None: """Test _inspect_overlapping_areas.""" @@ -407,7 +420,9 @@ def test_inspect_overlapping_areas(self) -> None: ], } - self.zero_shot_visual_prompting_ov_inferencer._inspect_overlapping_areas(predicted_masks, used_points, predicted_masks.copy(), threshold_iou=0.5) + self.zero_shot_visual_prompting_ov_inferencer._inspect_overlapping_areas( + predicted_masks, used_points, predicted_masks.copy(), threshold_iou=0.5 + ) assert len(predicted_masks[0]) == 1 assert len(predicted_masks[1]) == 2 From 4eb93129c2ab2d27a4be30d50c83ef20add1ac96 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Mon, 19 Feb 2024 17:22:54 +0900 Subject: [PATCH 13/28] Fix e2e --- .../model_wrappers/openvino_models.py | 18 +- .../visual_prompters/segment_anything.py | 2 + .../zero_shot_segment_anything.py | 81 ++--- .../visual_prompting/tasks/inference.py | 15 +- .../visual_prompting/tasks/openvino.py | 125 ++++--- .../annotations/instances_train.json | 66 ++++ .../annotations/instances_val.json | 316 ++++++++++++++++++ .../images/train/Slide4.PNG | Bin 0 -> 26804 bytes .../images/val/Slide3.PNG | Bin 0 -> 21103 bytes .../images/val/Slide4.PNG | Bin 0 -> 6123 bytes .../images/val/Slide5.PNG | Bin 0 -> 31177 bytes .../images/val/Slide6.PNG | Bin 0 -> 21277 bytes .../images/val/Slide7.PNG | Bin 0 -> 32317 bytes .../images/val/Slide8.PNG | Bin 0 -> 22874 bytes .../images/val/Slide9.PNG | Bin 0 -> 26796 bytes .../compressed_prompt_getter.yml | 2 +- .../cli/visual_prompting/test_zero_shot.py | 10 +- 17 files changed, 509 insertions(+), 126 deletions(-) create mode 100644 tests/assets/car_tree_bug_zero_shot/annotations/instances_train.json create mode 100644 tests/assets/car_tree_bug_zero_shot/annotations/instances_val.json create mode 100644 tests/assets/car_tree_bug_zero_shot/images/train/Slide4.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide3.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide4.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide5.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide6.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide7.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide8.PNG create mode 100644 tests/assets/car_tree_bug_zero_shot/images/val/Slide9.PNG diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 816e7db118a..7c0e8da2b8e 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -172,23 +172,13 @@ def postprocess(self, outputs: Dict[str, np.ndarray], meta: Dict[str, Any]) -> T Returns: hard_prediction (np.ndarray): The hard prediction. - soft_prediction (np.ndarray): Resized, cropped, and normalized soft prediction. + soft_prediction (np.ndarray): The soft prediction. """ + probability = max(min(float(outputs["scores"]), 1.0), 0.0) + hard_prediction = outputs[self.output_blob_name].squeeze() > self.mask_threshold + soft_prediction = hard_prediction * probability - def sigmoid(x): - return np.tanh(x * 0.5) * 0.5 + 0.5 # to avoid overflow - - soft_prediction = outputs[self.output_blob_name].squeeze() - soft_prediction = sigmoid(soft_prediction) meta["soft_prediction"] = soft_prediction - - hard_prediction = create_hard_prediction_from_soft_prediction( - soft_prediction=soft_prediction, - soft_threshold=self.soft_threshold, - blur_strength=self.blur_strength, - ) - - probability = max(min(float(outputs["scores"]), 1.0), 0.0) meta["label"].probability = probability return hard_prediction, soft_prediction diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index f50afe06c8f..14535b04cb2 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -351,6 +351,8 @@ def postprocess_masks(cls, masks: Tensor, input_size: int, orig_size: Tensor) -> def get_prepadded_size(self, input_image_size: Tensor, longest_side: int) -> Tensor: """Get pre-padded size.""" + input_image_size = input_image_size.to(torch.float32) + longest_side = torch.tensor(longest_side).to(torch.float32) scale = longest_side / torch.max(input_image_size) transformed_size = scale * input_image_size return torch.floor(transformed_size + 0.5).to(torch.int64) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index a7c3181e6b6..aa3206eec89 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -40,69 +40,59 @@ def __init__(self, image_size: int, downsizing: int = 64) -> None: self.image_size = image_size self.downsizing = downsizing - self.zero_tensor = torch.as_tensor(0) - def set_default_thresholds(self, default_threshold_reference: float, default_threshold_target: float) -> None: """Set default thresholds.""" self.default_threshold_reference = default_threshold_reference self.default_threshold_target = default_threshold_target - def forward( + def get_prompt_candidates( self, image_embeddings: Tensor, reference_feats: Tensor, used_indices: Tensor, original_size: Tensor, - threshold: Tensor = torch.as_tensor([[0.0]], dtype=torch.float32), - num_bg_points: Tensor = torch.as_tensor([[1]], dtype=torch.int64), - ) -> Tuple[Tensor, Tensor]: + threshold: Tensor = torch.tensor([[0.0]], dtype=torch.float32), + num_bg_points: Tensor = torch.tensor([[1]], dtype=torch.int64), + device: Union[torch.device, str] = torch.device("cpu"), + ) -> Tuple[Dict[int, Tensor], Dict[int, Tensor]]: """Get prompt candidates.""" - device = image_embeddings.device - original_size = original_size.squeeze() - threshold = threshold.squeeze().to(device) - num_bg_points = num_bg_points.squeeze() + threshold = threshold.to(device) - total_points_scores: Tensor = torch.zeros(used_indices.max() + 1, 0, 3, device=device) - total_bg_coords: Tensor = torch.zeros(used_indices.max() + 1, num_bg_points, 2, device=device) - for i in range(len(used_indices[0])): - label = used_indices[0][i] - points_scores, bg_coords = self.get_prompt_candidates( + total_points_scores: Dict[int, Tensor] = {} + total_bg_coords: Dict[int, Tensor] = {} + for label in map(int, used_indices[0]): + points_scores, bg_coords = self( image_embeddings=image_embeddings, reference_feat=reference_feats[label], original_size=original_size, threshold=threshold, num_bg_points=num_bg_points, - device=device, ) - pad_size = torch.as_tensor(points_scores.shape[0] - total_points_scores.shape[1]) - pad_tot = torch.max(self.zero_tensor, pad_size) - pad_cur = torch.max(self.zero_tensor, -pad_size) - - total_points_scores = F.pad(total_points_scores, (0, 0, 0, pad_tot, 0, 0), value=-1) - points_scores = F.pad(points_scores, (0, 0, 0, pad_cur), value=-1) - total_points_scores[label] = points_scores total_bg_coords[label] = bg_coords return total_points_scores, total_bg_coords - def get_prompt_candidates( + def forward( self, image_embeddings: Tensor, reference_feat: Tensor, original_size: Tensor, - threshold: Union[Tensor, float] = 0.0, - num_bg_points: Union[Tensor, int] = 1, - device: Union[torch.device, str] = torch.device("cpu"), + threshold: Tensor = torch.tensor([[0.0]], dtype=torch.float32), + num_bg_points: Tensor = torch.tensor([[1]], dtype=torch.int64), ) -> Tuple[Tensor, Tensor]: """Get prompt candidates from given reference and target features.""" + original_size = original_size.squeeze() + threshold = threshold.squeeze() + num_bg_points = num_bg_points.squeeze() + target_feat = image_embeddings.squeeze() c_feat, h_feat, w_feat = target_feat.shape target_feat = target_feat / target_feat.norm(dim=0, keepdim=True) target_feat = target_feat.reshape(c_feat, h_feat * w_feat) - sim = reference_feat.to(device) @ target_feat + sim = reference_feat @ target_feat sim = sim.reshape(1, 1, h_feat, w_feat) sim = ZeroShotSegmentAnything.postprocess_masks(sim, self.image_size, original_size) @@ -138,7 +128,7 @@ def _point_selection( # to handle empty tensor len_fg_coords_scores = len(fg_coords_scores) - fg_coords_scores = F.pad(fg_coords_scores, (0, 0, 0, max(0, 1 - len_fg_coords_scores)), value=-1) + fg_coords_scores = F.pad(fg_coords_scores, (0, 0, 0, max(0, 10 - len_fg_coords_scores)), value=-1) ratio = self.image_size / original_size.max() width = (original_size[1] * ratio).to(torch.int64) @@ -159,10 +149,12 @@ def _point_selection( matched_grid = fg_coords_scores.unsqueeze(1) * matched_matrix.unsqueeze(-1) # sample the highest score one of the samples that are in the same grid - points_scores = matched_grid[matched_grid[..., -1].topk(k=1, dim=0, largest=True)[1][0]].diagonal().T + matched_indices = matched_grid[..., -1].topk(k=1, dim=0, largest=True)[1][0].to(torch.int64) + points_scores = matched_grid[matched_indices].diagonal().T # sort by the highest score - points_scores = points_scores[torch.argsort(points_scores[:, -1], descending=True)] + sorted_points_scores_indices = torch.argsort(points_scores[:, -1], descending=True).to(torch.int64) + points_scores = points_scores[sorted_points_scores_indices] return points_scores, bg_coords @@ -203,8 +195,8 @@ def __init__( default_threshold_target=config.model.default_threshold_target, ) - self.point_labels_box = torch.as_tensor([[2, 3]], dtype=torch.float32) - self.has_mask_inputs = [torch.as_tensor([[0.0]]), torch.as_tensor([[1.0]])] + self.point_labels_box = torch.tensor([[2, 3]], dtype=torch.float32) + self.has_mask_inputs = [torch.tensor([[0.0]]), torch.tensor([[1.0]])] self.transforms = get_transform( image_size=config.model.image_size, mean=config.dataset.normalize.mean, std=config.dataset.normalize.std @@ -215,9 +207,9 @@ def __init__( def load_state_dict_pre_hook(self, state_dict: Dict[str, Any], prefix: str = "", *args, **kwargs) -> None: """Load reference info manually.""" _reference_feats: Tensor = state_dict.get( - "reference_info.reference_feats", torch.as_tensor([], dtype=torch.float32) + "reference_info.reference_feats", torch.tensor([], dtype=torch.float32) ) - _used_indices: Tensor = state_dict.get("reference_info.used_indices", torch.as_tensor([], dtype=torch.int64)) + _used_indices: Tensor = state_dict.get("reference_info.used_indices", torch.tensor([], dtype=torch.int64)) self.reference_info = ParameterDict( { "reference_feats": Parameter(_reference_feats, requires_grad=False), @@ -255,8 +247,8 @@ def set_default_config(self) -> DictConfig: def set_empty_reference_info(self) -> None: """Set empty reference information.""" - reference_feats: Parameter = Parameter(torch.as_tensor([], dtype=torch.float32), requires_grad=False) - used_indices: Parameter = Parameter(torch.as_tensor([[]], dtype=torch.int64), requires_grad=False) + reference_feats: Parameter = Parameter(torch.tensor([], dtype=torch.float32), requires_grad=False) + used_indices: Parameter = Parameter(torch.tensor([[]], dtype=torch.int64), requires_grad=False) self.reference_info = ParameterDict( { "reference_feats": reference_feats, @@ -268,7 +260,7 @@ def set_empty_reference_info(self) -> None: def initialize_reference_info(self) -> None: """Initialize reference information.""" self.reference_info["reference_feats"] = Parameter(torch.zeros(0, 1, 256), requires_grad=False) - self.reference_info["used_indices"] = Parameter(torch.as_tensor([[]], dtype=torch.int64), requires_grad=False) + self.reference_info["used_indices"] = Parameter(torch.tensor([[]], dtype=torch.int64), requires_grad=False) self.is_reference_info_empty = False def expand_reference_info(self, new_largest_label: int) -> None: @@ -372,7 +364,7 @@ def learn(self, batch: List[Dict[str, Any]], reset_feat: bool = False) -> Union[ self.reference_info["reference_feats"][label] = ref_feat.detach().cpu() self.reference_info["used_indices"] = Parameter( - torch.cat((self.reference_info["used_indices"], torch.as_tensor([[label]])), dim=1), + torch.cat((self.reference_info["used_indices"], torch.tensor([[label]])), dim=1), requires_grad=False, ) ref_masks[label] = ref_mask.detach().cpu() @@ -420,18 +412,19 @@ def infer( original_size = torch.as_tensor(tb["original_size"]) image_embeddings = self.image_encoder(images) - total_points_scores, total_bg_coords = self.prompt_getter( + total_points_scores, total_bg_coords = self.prompt_getter.get_prompt_candidates( image_embeddings=image_embeddings, reference_feats=reference_feats, used_indices=used_indices, original_size=original_size, + device=self.device, ) predicted_masks: defaultdict = defaultdict(list) used_points: defaultdict = defaultdict(list) - for label, (points_scores, bg_coords) in enumerate(zip(total_points_scores, total_bg_coords)): + for label in total_points_scores.keys(): + points_scores = total_points_scores[label] + bg_coords = total_bg_coords[label] for points_score in points_scores: - if points_score[-1] == -1: - continue x, y = points_score[:2] is_done = False for pm in predicted_masks.get(label, []): @@ -444,7 +437,7 @@ def infer( point_coords = torch.cat((points_score[:2].unsqueeze(0), bg_coords), dim=0).unsqueeze(0) point_coords = self._preprocess_coords(point_coords, original_size, self.config.model.image_size) - point_labels = torch.as_tensor( + point_labels = torch.tensor( [1] + [0] * len(bg_coords), dtype=torch.float32, device=self.device ).unsqueeze(0) mask = self._predict_masks( diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index a9b6105bb12..86a45131b52 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -641,20 +641,19 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): model_to_export = self.model.image_encoder elif module == "visual_prompting_prompt_getter": + reference_feat = torch.randn(1, 256, dtype=torch.float32) + reference_feat /= reference_feat.norm(dim=-1, keepdim=True) dummy_inputs = { "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float32), - "reference_feats": torch.randn(2, 1, 256, dtype=torch.float32), - "used_indices": torch.tensor([[0, 1]], dtype=torch.int64), + "reference_feat": reference_feat, "original_size": torch.randint(low=0, high=image_size * 2, size=(1, 2), dtype=torch.int64), - "threshold": torch.tensor([[0.1]], dtype=torch.float32), + "threshold": torch.tensor([[0.0]], dtype=torch.float32), "num_bg_points": torch.randint(low=1, high=image_size, size=(1, 1), dtype=torch.int64), } - output_names = ["total_points_scores", "total_bg_coords"] + output_names = ["points_scores", "bg_coords"] dynamic_axes = { - "reference_feats": {0: "num_labels"}, - "used_indices": {1: "num_labels"}, - "total_points_scores": {0: "num_labels", 1: "num_points"}, - "total_bg_coords": {0: "num_labels", 1: "num_points"}, + "points_scores": {0: "num_points"}, + "bg_coords": {0: "num_points"}, } model_to_export = self.model.prompt_getter diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 9db584f6af2..b7aaa85bc51 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -303,24 +303,6 @@ def pre_process_image_encoder( """Pre-process function of OpenVINO Zero-shot Visual Prompting Inferencer for image encoder.""" return self.model["image_encoder"].preprocess(inputs, extra_processing) - def pre_process_prompt_getter( - self, - image_embeddings: Dict[str, np.ndarray], - reference_feats: np.ndarray, - used_indices: np.ndarray, - original_size: np.ndarray, - ) -> Dict[str, np.ndarray]: - """Pre-process function of OpenVINO Zero-shot VIsual Prompting Inferencer for prompt getter.""" - inputs_prompt_getter = { - "original_size": original_size[None], - "reference_feats": reference_feats, - "used_indices": used_indices, - "threshold": np.asarray([[self.model["prompt_getter"].sim_threshold]], dtype=np.float32), - "num_bg_points": np.asarray([[self.model["prompt_getter"].num_bg_points]], dtype=np.int64), - } - inputs_prompt_getter.update(image_embeddings) - return inputs_prompt_getter - def learn(self, images: np.ndarray): """Learn.""" @@ -330,23 +312,20 @@ def infer( """Perform a prediction for a given input image.""" # forward image encoder images, meta = self.pre_process_image_encoder(images) - original_size = np.asarray(meta["original_shape"][:2], dtype=np.int64) + original_size = np.asarray([meta["original_shape"][:2]], dtype=np.int64) image_embeddings = self.forward_image_encoder(images) # get point candidates - inputs_prompt_getter = self.pre_process_prompt_getter( - image_embeddings, reference_feats, used_indices, original_size - ) - total_prompts = self.forward_prompt_getter(inputs_prompt_getter) + total_points_scores, total_bg_coords = self.forward_prompt_getter(image_embeddings, reference_feats, used_indices, original_size) annotations: DefaultDict = defaultdict(list) predicted_masks: DefaultDict = defaultdict(list) used_points: DefaultDict = defaultdict(list) - for label, (points_scores, bg_coords) in enumerate( - zip(total_prompts["total_points_scores"], total_prompts["total_bg_coords"]) - ): + for label in total_points_scores.keys(): + points_scores = total_points_scores[label] + bg_coords = total_bg_coords[label] for points_score in points_scores: - if points_score[-1] == -1: + if points_score[-1] in [-1., 0.]: continue x, y = points_score[:2] is_done = False @@ -359,33 +338,66 @@ def infer( continue point_coords = np.concatenate((np.array([[x, y]]), bg_coords), axis=0, dtype=np.float32) - point_coords = self.model["decoder"]._apply_coords(point_coords, original_size) + point_coords = self.model["decoder"]._apply_coords(point_coords, original_size[0]) point_labels = np.array([1] + [0] * len(bg_coords), dtype=np.float32) inputs_decoder = { "point_coords": point_coords[None], "point_labels": point_labels[None], - "orig_size": original_size[None], + "orig_size": original_size, } inputs_decoder.update(image_embeddings) prediction = self.forward_decoder(inputs_decoder, original_size) prediction.update({"scores": points_score[-1]}) - metadata = { - "label": [_label for _label in self.labels if int(_label.id_) == label][0], - "original_size": original_size[None], - } - # set annotation for eval - annotation, hard_prediction, _ = self.post_process(prediction, metadata) - annotations[label].extend(annotation) - predicted_masks[label].append(hard_prediction) + predicted_masks[label].append(prediction[self.model["decoder"].output_blob_name]) used_points[label].append(points_score) - self._inspect_overlapping_areas(predicted_masks, used_points, annotations) + + self._inspect_overlapping_areas(predicted_masks, used_points) + + for label, predictions in predicted_masks.items(): + if len(predictions) == 0: + continue + metadata = { + "label": [_label for _label in self.labels if int(_label.id_) == label][0], + "original_size": original_size, + } + for prediction, used_point in zip(predictions, used_points[label]): + annotation, _, _ = self.post_process( + { + self.model["decoder"].output_blob_name: prediction, + "scores": used_point[-1] + }, + metadata) + annotations[label].extend(annotation) + return sum(annotations.values(), []), predicted_masks, used_points - def forward_prompt_getter(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + def forward_prompt_getter( + self, + image_embeddings: Dict[str, np.ndarray], + reference_feats: np.ndarray, + used_indices: np.ndarray, + original_size: np.ndarray, + ) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" - return self.model["prompt_getter"].infer_sync(inputs) + inputs = { + "original_size": original_size, + "threshold": np.array([[self.model["prompt_getter"].sim_threshold]], dtype=np.float32), + "num_bg_points": np.array([[self.model["prompt_getter"].num_bg_points]], dtype=np.int64), + **image_embeddings + } + total_points_scores: Dict[int, np.ndarray] = {} + total_bg_coords: Dict[int, np.ndarray] = {} + for label in used_indices[0]: + reference_feat = reference_feats[label] + inputs["reference_feat"] = reference_feat + outputs = self.model["prompt_getter"].infer_sync(inputs) + + total_points_scores[label] = outputs["points_scores"] + total_bg_coords[label] = outputs["bg_coords"] + + return total_points_scores, total_bg_coords def forward_decoder( # type: ignore self, inputs: Dict[str, np.ndarray], original_size: np.ndarray @@ -419,7 +431,7 @@ def forward_decoder( # type: ignore has_mask_input = self.has_mask_inputs[1] y, x = np.nonzero(masks) box_coords = self.model["decoder"]._apply_coords( - np.array([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=np.float32), original_size + np.array([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=np.float32), original_size[0] ) inputs.update( { @@ -467,7 +479,7 @@ def _inspect_overlapping_areas( self, predicted_masks: Dict[int, List[np.ndarray]], used_points: Dict[int, List[np.ndarray]], - annotations: Dict[int, List[np.ndarray]], + # annotations: Dict[int, List[np.ndarray]], threshold_iou: float = 0.8, ): def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): @@ -497,12 +509,12 @@ def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): for im in sorted(list(set(overlapped_label)), reverse=True): masks.pop(im) used_points[label].pop(im) - annotations[label].pop(im) + # annotations[label].pop(im) for jm in sorted(list(set(overlapped_other_label)), reverse=True): other_masks.pop(jm) used_points[other_label].pop(jm) - annotations[other_label].pop(jm) + # annotations[other_label].pop(jm) def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore """Perform a prediction for a given input image.""" @@ -620,7 +632,7 @@ def __getitem__(self, index: int) -> Dict[str, Any]: items = self.dataset[index] images, meta = self.inferencer.pre_process_image_encoder(items.numpy, extra_processing=True) # type: ignore - original_size = np.asarray(meta["original_shape"][:2]) + original_size = np.asarray([meta["original_shape"][:2]]) _, _, h, w = images["images"].shape pad_width = ((0, 0), (0, 0), (0, self.target_length - h), (0, self.target_length - w)) images["images"] = np.pad(images["images"], pad_width, mode="constant", constant_values=0) @@ -628,26 +640,31 @@ def __getitem__(self, index: int) -> Dict[str, Any]: return images else: image_embeddings = self.image_encoder(images["images"]) - inputs_prompt_getter = self.inferencer.pre_process_prompt_getter( - image_embeddings, self.reference_feats, self.used_indices, original_size - ) if self.module_name == "prompt_getter": - return inputs_prompt_getter + return { + "reference_feat": self.reference_feats[self.used_indices[0][0]], # only use the first feature + "original_size": original_size, + "threshold": np.array([[self.inferencer.model["prompt_getter"].sim_threshold]], dtype=np.float32), + "num_bg_points": np.array([[self.inferencer.model["prompt_getter"].num_bg_points]], dtype=np.int64), + **image_embeddings + } - total_prompts = self.prompt_getter(inputs_prompt_getter) + total_points_scores, total_bg_coords = self.inferencer.forward_prompt_getter( + image_embeddings, self.reference_feats, self.used_indices, original_size) + # only use the first prompt - point_score = total_prompts["total_points_scores"][0][0] - bg_coords = total_prompts["total_bg_coords"][0] + point_score = total_points_scores[0][0] + bg_coords = total_bg_coords[0] x, y = point_score[:2] point_coords = np.concatenate((np.array([[x, y]]), bg_coords), axis=0, dtype=np.float32) - point_coords = self.inferencer.model["decoder"]._apply_coords(point_coords, original_size) + point_coords = self.inferencer.model["decoder"]._apply_coords(point_coords, original_size[0]) point_labels = np.array([1] + [0] * len(bg_coords), dtype=np.float32) inputs_decoder = {"point_coords": point_coords[None], "point_labels": point_labels[None]} inputs_decoder.update(image_embeddings) inputs_decoder.update( { - "orig_size": original_size[None], + "orig_size": original_size, "mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32), "has_mask_input": np.zeros((1, 1), dtype=np.float32), } diff --git a/tests/assets/car_tree_bug_zero_shot/annotations/instances_train.json b/tests/assets/car_tree_bug_zero_shot/annotations/instances_train.json new file mode 100644 index 00000000000..39dadb88943 --- /dev/null +++ b/tests/assets/car_tree_bug_zero_shot/annotations/instances_train.json @@ -0,0 +1,66 @@ +{ + "licenses": [{ "name": "", "id": 0, "url": "" }], + "info": { + "contributor": "", + "date_created": "", + "description": "", + "url": "", + "version": "", + "year": "" + }, + "categories": [ + { "id": 1, "name": "car", "supercategory": "" }, + { "id": 2, "name": "tree", "supercategory": "" }, + { "id": 3, "name": "bug", "supercategory": "" } + ], + "images": [ + { + "id": 6, + "width": 1280, + "height": 720, + "file_name": "Slide4.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + } + ], + "annotations": [ + { + "id": 16, + "image_id": 6, + "category_id": 3, + "segmentation": [ + [251.2, 150.5, 372.47, 47.31, 596.99, 231.4, 455.05, 376.77] + ], + "area": 53610.0, + "bbox": [251.2, 47.31, 345.79, 329.46], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 17, + "image_id": 6, + "category_id": 2, + "segmentation": [ + [641.72, 255.48, 731.18, 87.74, 848.17, 144.52, 746.67, 311.4] + ], + "area": 23927.0, + "bbox": [641.72, 87.74, 206.45, 223.66], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 18, + "image_id": 6, + "category_id": 1, + "segmentation": [ + [791.4, 443.9, 984.95, 183.23, 1112.26, 273.55, 910.97, 536.77] + ], + "area": 50412.0, + "bbox": [791.4, 183.23, 320.86, 353.54], + "iscrowd": 0, + "attributes": { "occluded": false } + } + ] +} diff --git a/tests/assets/car_tree_bug_zero_shot/annotations/instances_val.json b/tests/assets/car_tree_bug_zero_shot/annotations/instances_val.json new file mode 100644 index 00000000000..bdbfe9331bf --- /dev/null +++ b/tests/assets/car_tree_bug_zero_shot/annotations/instances_val.json @@ -0,0 +1,316 @@ +{ + "licenses": [{ "name": "", "id": 0, "url": "" }], + "info": { + "contributor": "", + "date_created": "", + "description": "", + "url": "", + "version": "", + "year": "" + }, + "categories": [ + { "id": 1, "name": "car", "supercategory": "" }, + { "id": 2, "name": "tree", "supercategory": "" }, + { "id": 3, "name": "bug", "supercategory": "" } + ], + "images": [ + { + "id": 7, + "width": 1280, + "height": 720, + "file_name": "Slide3.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + }, + { + "id": 8, + "width": 1280, + "height": 720, + "file_name": "Slide4.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + }, + { + "id": 1, + "width": 1280, + "height": 720, + "file_name": "Slide9.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + }, + { + "id": 2, + "width": 1280, + "height": 720, + "file_name": "Slide8.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + }, + { + "id": 3, + "width": 1280, + "height": 720, + "file_name": "Slide7.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + }, + { + "id": 4, + "width": 1280, + "height": 720, + "file_name": "Slide6.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + }, + { + "id": 5, + "width": 1280, + "height": 720, + "file_name": "Slide5.PNG", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + } + ], + "annotations": [ + { + "id": 19, + "image_id": 7, + "category_id": 1, + "segmentation": [ + [184.09, 131.61, 338.06, 129.89, 339.78, 457.63, 183.23, 461.08] + ], + "area": 51030.0, + "bbox": [183.23, 129.89, 156.55, 331.19], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 20, + "image_id": 7, + "category_id": 2, + "segmentation": [ + [832.69, 104.09, 1018.49, 102.37, 1017.63, 226.24, 825.81, 233.98] + ], + "area": 23933.0, + "bbox": [825.81, 102.37, 192.68, 131.61], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 21, + "image_id": 7, + "category_id": 3, + "segmentation": [ + [898.92, 490.32, 1195.7, 487.74, 1209.46, 673.55, 913.55, 670.11] + ], + "area": 54157.0, + "bbox": [898.92, 487.74, 310.54, 185.81], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 22, + "image_id": 8, + "category_id": 3, + "segmentation": [ + [341.51, 373.33, 502.4, 456.8, 341.5, 709.7, 188.39, 612.47] + ], + "area": 52814.0, + "bbox": [188.39, 373.33, 314.01, 336.37], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 1, + "image_id": 1, + "category_id": 3, + "segmentation": [ + [17.2, 166.88, 203.87, 7.74, 410.32, 43.87, 117.85, 331.18] + ], + "area": 58273.0, + "bbox": [17.2, 7.74, 393.12, 323.44], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 2, + "image_id": 1, + "category_id": 1, + "segmentation": [ + [294.19, 281.29, 643.44, 300.22, 628.82, 469.68, 277.85, 449.03] + ], + "area": 59331.0, + "bbox": [277.85, 281.29, 365.59, 188.39], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 3, + "image_id": 1, + "category_id": 2, + "segmentation": [ + [114.41, 499.79, 30.97, 670.11, 151.4, 705.38, 240.86, 536.77] + ], + "area": 24033.0, + "bbox": [30.97, 499.79, 209.89, 205.59], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 4, + "image_id": 2, + "category_id": 1, + "segmentation": [[165.16, 2.58, 344.95, 41.29, 27.5, 363.0, 9.46, 147.1]], + "area": 53173.0, + "bbox": [9.46, 2.58, 335.49, 360.42], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 5, + "image_id": 2, + "category_id": 2, + "segmentation": [ + [524.73, 378.49, 648.6, 227.96, 762.15, 298.49, 627.96, 458.49] + ], + "area": 26526.0, + "bbox": [524.73, 227.96, 237.42, 230.53], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 6, + "image_id": 2, + "category_id": 3, + "segmentation": [ + [946.24, 652.9, 1191.4, 356.13, 1274.8, 576.3, 1092.5, 715.7] + ], + "area": 55317.0, + "bbox": [946.24, 356.13, 328.56, 359.57], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 7, + "image_id": 3, + "category_id": 2, + "segmentation": [ + [584.95, 221.94, 715.7, 223.66, 706.24, 411.18, 583.23, 413.76] + ], + "area": 24074.0, + "bbox": [583.23, 221.94, 132.47, 191.82], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 8, + "image_id": 3, + "category_id": 1, + "segmentation": [ + [826.67, 222.8, 966.9, 176.3, 1081.29, 489.46, 931.61, 542.8] + ], + "area": 51362.0, + "bbox": [826.67, 176.3, 254.62, 366.5], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 9, + "image_id": 3, + "category_id": 3, + "segmentation": [ + [698.49, 384.52, 864.52, 390.54, 872.26, 688.17, 683.01, 683.01] + ], + "area": 52982.0, + "bbox": [683.01, 384.52, 189.25, 303.65], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 10, + "image_id": 4, + "category_id": 1, + "segmentation": [ + [69.68, 11.18, 67.1, 336.34, 213.33, 338.92, 222.8, 10.32] + ], + "area": 48945.0, + "bbox": [67.1, 10.32, 155.7, 328.6], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 11, + "image_id": 4, + "category_id": 2, + "segmentation": [ + [569.46, 70.54, 688.17, 70.54, 683.01, 262.37, 559.14, 263.23] + ], + "area": 23273.0, + "bbox": [559.14, 70.54, 129.03, 192.69], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 12, + "image_id": 4, + "category_id": 3, + "segmentation": [ + [972.04, 116.13, 1265.38, 95.48, 1274.84, 295.05, 974.62, 292.47] + ], + "area": 55841.0, + "bbox": [972.04, 95.48, 302.8, 199.57], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 13, + "image_id": 5, + "category_id": 3, + "segmentation": [ + [200.43, 336.34, 385.38, 334.62, 382.8, 635.7, 206.45, 638.28] + ], + "area": 54478.0, + "bbox": [200.43, 334.62, 184.95, 303.66], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 14, + "image_id": 5, + "category_id": 2, + "segmentation": [ + [594.41, 523.01, 779.35, 523.87, 778.49, 643.44, 590.97, 645.16] + ], + "area": 22525.0, + "bbox": [590.97, 523.01, 188.38, 122.15], + "iscrowd": 0, + "attributes": { "occluded": false } + }, + { + "id": 15, + "image_id": 5, + "category_id": 1, + "segmentation": [ + [1101.1, 304.6, 1230.1, 389.6, 1058.1, 665.8, 929.03, 581.51] + ], + "area": 50271.0, + "bbox": [929.03, 304.6, 301.07, 361.2], + "iscrowd": 0, + "attributes": { "occluded": false } + } + ] +} diff --git a/tests/assets/car_tree_bug_zero_shot/images/train/Slide4.PNG b/tests/assets/car_tree_bug_zero_shot/images/train/Slide4.PNG new file mode 100644 index 0000000000000000000000000000000000000000..236d848e36a377a1791144c9bc0325801af42b16 GIT binary patch literal 26804 zcmeEu`9IWq8~2n`j%=M$La5V%3fXrmMG|985h~eZ?87iyZ-_LR_@9TZNukSM*ZFSat-1|@{6syM7%X%o( z9{8WTTmRk-|8;@?+AjF7U9NiSmr&WwMQ z@}FIGn6&#Sl#PbQ<%3%;Is2-u%99=j8W-jUPEMx->(rnrny?IK4ilDDKwVFz&ZK(LEr4 zgqH#3znf*vf&5Svjp-HJcsO34ga7=#AjzwHLTWVWF7tOSQ@(hvc!D>Tk=Qc+scoJ| zbr*e!eu5~5LJ3yCj51ptZO+7aZ*E@1rq(-Tn&q3UROZaQ|H!NUNzqoG#qX2n;O`rj z%*HG6aqF*=lq15&KJ`@D`_Z1ujSgE7m}g{8!V>+_@Fkn%e$9;q zg-X!zKVHl$hdXD`-m1%tGdRqCyU%jEjJDTAlqu(y;#}g5N-IvcoVUep7PRkTMY!RC zxm*EJpA-LEe65++aGc*!XLLi`I|oLUShHLCZ*(4;xMfmct&P@?GkuFBe0pz%IfjMq zLJc0=cYfr<=iqyUe6u|F>(ou?a1OePu?HbI-^J4-Qm*rdIq`j>y4; znYJ92pBbHFGWVXk^S_NoU7jiSxr^g;`1IyB-9UHk(Gib9xY*jdjsGsz=hCjg*ncvY zW3FW|aDGc?@bj6*rnekMmP8e*e#vw1m#;n(Z^`Ee7j>y6ZiC1jC>#Fo4wUvBak=n- zggKKtS-o?}uhjlKWI`%RwQokjDb0c)m$fkvH6S+}Im}x^tw65)RdQWYOaIjym_8om zmXQF=YKwYX16PZ-h5{!Wf8Ni8Wn&pUmhnVp~*+A&h;Et{@J!vzH@A_(BJl8ELZSn zg7+%FG!wUoPx1|xUyeDb>_y|{?vuhgF!2vhG*PeoZzL}GaIk$6S&2w5{j6? z3dfzN>Dj}Fzb{v*Cq&m&t-LYax&aLs#i>e8|8@iq=c^aZ>u)>#Wy8>m)TzW0CHu@sWgj_|27Q$Ep z2M5-l--Gp$Zu=T$qb04iX#IKA*Y*kQn6mA`I-=N1Ptr<7mEEu^HoL^OqcJd#Vq;(A}H;NHXe&2|P|S=WJ##ZNnKC)HqQv;pbP}z^O(SZctWU zF7Jp@tGn>1GIz1lgwx!#ALHGxXP}%vGg!*2ZWyQHIDu`G@c4-h?C-X$o?fLf6cLko5wFi}0feQJWIpJY__5q;AcA5IGR z%{@7lzC3VHV*LyA-bP1r`E++mn@qpT?2EV@HR@j%MD+cSt98+ROwxi&eHjNsIB0Ks z&Ub$?u5`wDmj7fXKMj5O^=7`USI*BF+;x$C5mFr8A!ob;xfF{nTGFp1?lsD)l)$N= zTq0q8;*e*Im!jha_!f@^;H_Fr%EVx0Zq$t8T2d2!I*W4Ts=sOwd$Z^2daL#ZndmtE zx^dPxjHz?IjfXiqR=>!e97?I&_2VL3(p9s=gGp73H7n1n?n*b^%oDEYZ(B-PIwmB& z)gUpdudwp=w%Zs_{M74zg0JBQI#M>C)JVldswbfQZ-XO*JW0CQhw;UEH$HxXaocL_ zB+4|V*w6MJ7?_=CnjL3)cHVyNgQla6u5_|z<8Nk(XuD-`xY&B@A}y^w8(XdK>w)90^1SNV`^Yvc1MEqdw^D#tSTR`5 z&KR&r^R5TRzCT%oe}7(F`$~&8_8E=MqdeZdAA`8}p2y>mm}8L#I4}wePwI2wiyxF> zJ4r(`W6H}n+*n3lG4NM)ySkJwJTKyEk~qzkoAy=B#{V`Pdc|$IncSMZJrlg*sromo zoT`_Clc|wf#RipJ1CHM$ypt@Bg>4enqLwPsn|_^o*Ce4V6r+1vxiDCPf#GYyZkG8x zn{O#TWo$QMs<33m?Vpr#%XTC~U{+{jEAV6UbAHQp$W5)*h;YS8+~C?8+|z%$D86{1 zi=xV~-w0VhGoi1iVv44noNzVgm0UD{8aravz4;8IBv=VUN)^X|GbvjYPJz>7hD3w| zq({t%@3Ur+m}_PmN)BW{ig!cZ)7Ta0Dd$4FDOf!dB%+CVEf*bn;zl49@p)uTJ9gx} zeQ|^Rcut?&c8QsO@NmVNoRZRMPB|u){jRVMZHR08HU`26PP$57*_xfAZ(Si4*Qc#qqC9VSGbZq7it-9781JnZSsY8!oa3k+6qzZWMf4{vB&n^sgT!P;~^E>o2|cO zz%_}SsAIm60F#;bwm7tJ(`m}@T!r{nb(|CGPpI=GRXFsm`+C-$wQ>jzUA&S;8}bgo z@$;&_KDs(rIeXBUy&_*#?PrJ=PD?o3^S%#_7xk1MGK&rIxt4I5p&M6ub{Tr?w((MI z=lO0wHUv}*%tvO(BoF&I6AfDesiQ3Ce->=^@Pn5&+58&5>!(p)Lm=e2FK!B$ETx@;WHx!@ovP3*w&n9Pc6IW}4aanx zM8(8&>*dRJUU%X8hcsE=uIst6XdKvgKkZfGb*qks*sDEFGqbmy^=VqDr(AIUb^u!- zV_#w5PjieYeeI#j=P=;Deil=45Fb8Wey?k6u8OC)il28pAXSFIS=9lC`YiOg##mNG zVRy&xkbCdW3gy@+K+Yq|>^f!zw~+HK(~Lh> zOO`TrS-MsdJ*D zho#^~YNC)p09WyyXu*$-X~nhn^GZCo%)b9)vl-m7#{DzuH@MDC5_V9h&e!$%x^RZ$ zTW4)RK)#t%AzhX$NFwg1S|G3=qSS%s&NkmVBTB|q$bK71spe$5W5IqY=1Q50tCVV@ zbJbN=>70QQ8Tr7kzbb~m=^TT!pP+lr_0);O5l>nONPgNt?H2%N>+em#Fb90zYPIZq zAr{Z1v~1FU=2BRb=cYlsE$v|9RelxCzGQKqdF`#S>(TiD}5H znc^`sH7s0ieko)~CV0@_pPilLb?GVaqe$l-OEw(S`7W}iEm++`C=ywZM}39tFE_aU zF4H8k`O}76XCQi9%-;A-^BIaecFU}&Z1{(v!#bH44R>;}i39T%1PTErl8VM7aTQv(K4VmZ;+`XncI`4>2WD*_~jy*zT$0BN(CPXhi6 z3q8)rEAQjf`8_DGWz|uz@?$vZjZp!wIAIZjz3KedC%YBzcx;3)x}F_6d$t-i{{f}i zeARvM5uey(HEW-5roPLX{!E~v@h@w~v@hG|mscH<_`K|}1=qcfn={5}JcU$8faKSI zx?mP^FQxJXRi&`uR>1Dq@+g1pQyhb=IoZ!68T@4d&qqCBQ+#mmw!D)?A(?hm<#-<< zLu>6(D8pEjN!k2h;tArl(UKjv?f>Jp>Q5#WCh9Cgsjl(HlAexrWwBHxG1I!@ACGre zc+~|HlXeVk*nGN2eL%8y<7V z<~(j1$u-^ZFfbw?UsSN>sL{XX`6^bVI+?%X!mhnfh2aX@Dn9ZsUpw3TrfEiU{Tc1d zFBB3KGL;v@%p<)98J4`8#wD(&eStLW(3vd$sS%L(r>bi=L=dxG=mZxO#oPH7Gi7-X zbd0fNV$LKgv6bg&et#_YM%Qp04mkHCN;SI|m;h>K2KTCgliTm&x;Q`u^{bh0Pr5A( zOO1KiPO3AW!-aqlvF+ubiUG>p0Z@H`*XM#$@bZ@VpEd>VBRYOL5O`9nTFXcw{< zVh_}rf^}SjFs^YHsd%O97#IIG9_swgk^42oEE z2Q2?88~>z(CoRQwX0-H$2#j2-;S@S&ex&N#$0$*sp(usqrRT-L**IyR1$LGC+=JaG zzoz(krF4&YC}HgM2?Ais(v>AE1=8S4*C2Q_!)bo%UDYYLxs#~8M!^DujVfK{e|U!` z>=z*@a1e?fM|vF1%?nKpP<3#W86mbN)V2f~35sM^;EC-#W_|zAkD8qv{#hCUkf68h zf!XT65ZWsMkJwb+6QT{wJrpfnUZWiOeSa-qrD`Pbu%*QFDf{0mUlC9tUuyXMaVY}M zYt|o-7jXq#gz>R-y^;j=T8mnf(j#+OWI|H%leAB4`B>$IN0S@m?X_W=}( z;4wqLXcjTfO^1>(_U*=2huabQ%}g`b8Z1ljLDH8&hjO-nFnK`eM6s{mYXqzUJ!IyZ zr`afM@ub|T)x*e1)cmGJ_bD5TqJFFI3fwFPy35|L$^kOmi`AhMmD#xWbrFVKLK|Nm z80Su?HYR^3<;1d6#`n~yBwN@# zC>E)bFCe$him#h3>w%SwchrP14kysDZbu|%l59Fr2>45Dn%_0p_HN>%Sc{&xBR+<{ z678pyuSkNajG9GHF0V;quE|IxG+0?n~= z9ShceM}?1~azCIL3g2y2yI#SMf0X%NaUB>t;0yIT%3VeSzY{#BptuMMD{!9LAc{%G zXB1)s|CPV*`?!ewa=84xKtW@cY3oq?a&#Y^k{gy>$Z#x@3(RlOF)_$EwPx?t4*L_dyzo z2U)x*=Z~;DF5GZ!z$kNj@IBwu+PS)YI0KJKnehPg*^tQhUrgOP^M}HHR&}pa+b&;1 z0O~?|oGNpZ4URR^7hAx*RDPT>tmPyFb-ERQY3j2LabXuy4(P=28TFY<&OM|(f4d^Y zRa})u#=mgS8Eo~gwf>t|Rz6TooL)5qc`C&8bq|gESnceT9(YUlys${z%~o&Y7>G7^ z)ePhOp#Eo0c*_NA6-$A&3URq%=c|L-Htt?(bNrDE#h#hsLi|kmvbsI-7*e1P;Hx{L zk}Pak?4^YX;Est;tt2ZSSVMW+V8szsJd3xFd9{*|P{whi7JV$iZoTie;=;1#_3hmw z&{#;*60A<#mm*U;GO}9vXrob>VkU-}_--SYhZ*i#sWmYdvv+l4VRfWF?C%#SnDtXH z4~dSNVL=@2No4=&!%tKURDQwY9m9>K>Yq{MQ~WH;Ki?*;NYa-UQEAM`3+}a`j0AkP z4)>o~H!m)^n;la4`<>E|)*sW~yV8PPp77Ft?C8rzH8dkGgH1j03%%uYcisGj@7$y8 zVUw@RF0BB=ZsRO6Kcbt$y4d`G>H9zI^}#gazcFy8a@PEn0yroO*EHLKX+|(}mS)$E z7laNafQ5Z(9Tlm}eSch3Nos+)QW$D@>3yb`VFGqZV- z*t_c`Gh%P}9Iww>+Z*kqJL3A_6?c0cJ(@ifeB;6M$kPft2)};^;kWu1QN1cjPr4GF z;LxWnDUX^H*8_JSwR8GzZYV%&gkxEXH9nI0qIIbDjg~#h-eseb(L=p~@q&mB>>(t& z;+$^r#SA6bhz!_sy=|B!MuCPnh)i$E(iY_icRl1@_JsL~ViszwLeeGnLkujLx z_O@u4TkC#c%qPr`lDh;ikN~Em%W|=e=lx})Ad#$_rHOPj{Wjt}!pfUziBy&AH3;#s z)*&(~L9%*NdG3_^&&-S1?Yd52Z#qn0anlz%CNv7k*$0h7ZyFrv^#*t$L-iChzKn8t zxSOTJ3W~ohoIy&r3KQb;(pyvv6h#n0lOi(M6ITK`*%7Y_d@a94I?oxsPpW)swY{Po zW7q9w;XWu9Ec(fjkgi_ucy?B*G*0Y}|Is9HCNErjsLtZG?^);^zt`6`5*sF0*fdBU zy!%8IcG8LD(?G?onBFxX956Mz@<~BPe_tthn8U2(c5&SCAoHa*(N}n%Bc67oTU{JeB z!iUs`U+#dq4Led7b|Tia8xoy8znEkUgoGwu3@J_a_PWxA-VV&e`K-7o^;@Jb>lufo zE6?-LU(OqKYOo3*Bv8S7)YrGWS$v*vD=aG)@6J{_6{TYMYb72rbEC=wS-rNU2ockX zWRf3M0EgK}h4CgBX>s5aF9m&CG3|Oh?lBFnRIHID64RbIKz+$!=Rh?fb>vz^kq5GU z?4p43Q)}3F;Wn~MRuTwNpra>BiE79e*B!(`s6; z47e12F0L6T36zG4_@-Wl0!P5TQ;S&6Q=^@d9S&rRYBpR-|&n~dlIHApZ@vx zt9fGHZX^uP35^N&|4MSR!nKD-?*z;;LzbA;xe3$5Q;@)Gga1IpTOI%rA)@hlS_g&5 z8xbJL*6HX30T(;(sdSpw0(Xy%xw+rio@(n5*huxsuvAd=`-L~HKilgOr?X$#L9i1k z2}G1UXXly5wYC?o7nGpBzD5p|)3-Dq+HG-aTauL%8d?*w_bj2q!5LOzjqqcKKIJ2& zb)isBiI>Y6V!<_y%_52^Aoq=(F)b<k-oRaIO4HvdeG0UN}sLLXAEgMu)Ri`|^O(KT&G&lqoVxRzSb{C#ZP_<->W(44AM zY}9yp;oJI`UW@C{xD&op zb}%H&LjQrALHWhyV)ZjwX_|}!5IN^g7&Z1Dg`XK_Rpz_t-Bt6;%n31Fh-(vq5~lC; z@chXSn?=jOhl?0nB4xnd$q#Ahk79%Ens?V~fNK{uDL-Xg&;1EF0mU_hX@c_tB33je zK8xoVE3j3Nx0`)3q&X$-`$A+e<_DNylekkZr-h5@N0#!;*$TJ^4+0PK?8f5#S<@vC zPpegHHVED}(@hmi*Xrz5NDkP0OdB$g_9ZWSAWonw zMKcp{N6%M(OJr#A9W1rEh;~9CneEy5o`bxq(!B6ugxH&|`qURA%c{xwY)r%`&8vNR#?k?AC0rwlr&moLwOV3WG$(vZ~?t%jMg#t=o zlZH71=B;v$I>sxi*1&qJs>qM;;ijcWm;72aDU8 zRDPWx8{h^E9&k!tU=e~}Aq_l146urt7&n)U^mGPupxA~mDw~v>U?2wbKUN2dtws+t z0hpi>P6V$UZjN}7$7Vq(+6X zZDv;T2QV^T#|ydt-6Q>A{oB^rscl6E@bydLeAlhMpQ}30F}P#rvZ(p@CAk#3XugN-I&RH>#*%Wwf2rU^n3uZfw{kFs+Nu!EmFhUO51^V{`OOFayK zGciWnUel!7A1HIpN zO=I`^)0Ic)9(@ii1(Uk(=8ar>HQWb@?eR>0RS2<0IPfC{ph(c}+fu9|R*ei$K>~Y0{Y$ML!Rlp|gtr~1zT9SHJ_eM}57)%< zz`Z=U2R8Ytsggwmw40-5hUHFbnD8^*;jr8lx@T_A4E8kN~(sD>Sjy4D6XJ;7mkfyIJ%GUE^m8;OKeiPfmQQ2REe1kRp#>4bvG2 zUX67hslqdvT?1Ug@;irKF|D%dtj2roRL7uB;1ph9@m+}*Jv__1<>zXw%NT;-YN{4R zjwoW0ELiBMIHXcE_bU5sp`83_bCx(ddh0C7vY@bC+cUlGXO=kMo0nDX@`4cIlc2r* zCX{@$GTaL)lKe6_gc5$eWu#WU+he*d`8)n`Q0V1`|4V8x5}Y}{ESk_Gotzct0i343 zbU&{@`oNwe#~i^2X736!s~n*vk7lHzVTm`v9Q?M%vFB~z{MhiKhw>C|SO?f-C$-qWn(gFYtlI`V9s()N zv&ixv{_|Kze-8E%0??VurW?suFoMu?VxiKfww60a6Ya-X&&BAn zTT~}r!N#?U4_{>BDj!*OqX7qY+SoY_lOy~t_tUhdLiYMbtFL|!w!b)@7Z*R%s)LlC zjCJVUKs*4|I}xWfDb^{&H~`&(0t)MI1P4*+^SR_JbtA8Uf{4^U(@1%c`bx*YT+8L) zVEBCmjiE3If%RJ_M=zO>6GFFh-^dF6-;)y~4!ya-!R{kinXj>x@<%(eQk9YMMP%O= zz^j^#K(?d2;pU)d$Tj=!e`qjTe7Z{HZUDQF&6D_bBc=5iU3{j6f@H^K$ZvH(<8F8q zKLk2ZMZaD71@v z(r}%E(aPoJ*1m>359oUNrI_{`$ zTDdu1sM4~YBaX)SidOWmu9+lTEND+UR$F^AChPdu1|yDwcXU+2jAjvyRh7~$X~ikO z*jOh3+9;di)@qQ}t%FwHtD1!X5bR*7TgQx7rgum&Go**h8y;4l>VY87kTH*I8d_y# zta@rSkAMXD5@=*}g_(6}NMGI0zJ9=-uteKWN*t1f`$mgyx>!ST(SJaQv{N!#x{6l% z+`S?C)E#LdcnA2Y)r}+ri2zt@?)v1^(<`T}eYxef#eIh6w@s^0y-zoH=i16T6;tQi zn&r_@5esP}%Ms z=s7<8ixnFK#jqtXt2F~))(nHO>Nsz)rn$jQ!LvlhtByhMMA5E~vYrAvHUHt*2$c;Y8(50f{07H(@g| z)q?cH&HT2%gn_+vbH)E9lF(D+c5Mp5MGGX7sZ^?>@oia&CGm9={fuH&LfKqUk!tw< zedk{agFk)xT%37V>au+3O{8ys<*f7-O>?xKC?D@j>KQ&R(8xF;kfQ9isY_2Rn`vG7 zA*VN=1A*}Tj6}2g8m_cDBH8$WLr~SMH$3$(^-%@mPc1MR#pZeqqARCe9 z7;i}|Pi?8!G#~&({xli7RdcIK53P$@(p=p&CL>jV(gK{`cw0JEb%iNFJ*-Fm%e(sj zZgK={@0@XwhbE+jI5m}5W}PQBg{Bp#32J@%=GeWyGg*&^q=K})A$RUVy3({??d6HO z-AuxajmmKgv3?V-t{VzQeH>Xru@BF^pm%YA-Tl4i$dBv=d;W805)ua5Pip?ByxYk- z^hTsA_H5c|&uauC8YubFCff&3W|gwET4G4|72j5e+%#~3COGh2O2#^}rnh*(QQ#0j z^meQ~iL{&DR~E5_HeX5}lus&^w(14z)_0b^R}SsjVfs#UvH7eHSQgV&sV3;P*UgV- zXMef-1m|js^P`Q|n<(eA^Q;S^oyxeab^?W#FBtQu7|>^e(?K1pkz@DzBgc|S2Qb}u zMz$-Neq|Y;!K~=*s~dP8su2R{uNT_x0zi~V zfQ(iIO7#{NU=Qf@4ll<=At!;5wT3x8*1kp`070|o9JX_EyZ-tU$h0_0FJiHcf}Vw` zU}9Ag+S^t&Ui(x1(m7cb!7CmFofd@(NA;@16A@54;0(jheFP@5sa)F}uC8D`breur zt!5U|S3Z3gn)OQ{cM24d4Ui~Hk2xMU@Z!zFG3{+VQEvM`Tc$&uwil753>!V+TWSa6 zzg0h9{%^#M>4A-l@VHroznI$LFRm{bD2mhH$j|*cMP}L~+c6kYckp$d2}y3Nfy=Is zNRGazAn65FEw}de)ZyYusnn5$)V9p%`zP+cB0&ofM63n0R7Xld6g-dQo|6A{q(FbB zdi0P3@T1M1=b5;mU|(U%GTnS9@lOz9^LC{(ts_uL*~tDe_@0#QtqDV|yf;t|*?{N; zxH{rwLJ}f74c^2NwsTb`^mt1`#{qu&hkFQpn#-y^0HwHuvG{@E(J1qHW&HX#PV>zy z^ybBA!g?mbt~N z-U8co>5J@so3N0j8Ho z=7sZ#HZOkYD|k#($OON%oCI7|u}Dqu<~Q@rZzsYOU*qOGC{)*4LZq&^1s^H#rzzaQ z--uAWwZ0coG9xMAX4PkXf*d1zT#Y1Ev}o(}=HL$NIrj{@WoD~i8+F!@QyAGAw=VcH z`9XU(d%U+@@juO_2q(iQb4aTMbGbl%Bz+I_;aL1~PgAs1@Vfn$kdRPMnfp}WvyYj3 zeJ#oE^AK}{Am${)H8?}3Py>6#?kyq?G7PKNaP7LRY0ijf&s1c+hIoi#AG|EaGfy~f z5B%vb8?~GIHCPwTfX;$IW$N>@+As?z?a(%+KNJ|*j#36Hy*qB?4}Kbsj_})X1*B@F zR@|8Qi%qK;{8DZ4r|Byzp1qF#1w0EjfHBNhElsJWxlnb_>F*Q^Lw{)|M3aH+@VfGqRUHcy!ULWYm1T$kAN>)^8Kj!{et?w`p`_e zuNPS~o4Z}aSmCJ*xSAxm+GGbt`!3lV@m-;;hA~rU{hUw_yOL?1>1|ZW>x(DAHwFy6bz=Cypkv^4i|A7W1)eT;N>)QW7royP8Ue9OV=^FFkAPt{nq@X#6> zBi;8y2odN&YaN@Qv-;C8uQvi@A3oT+S(!p$G0H?__BTHZfyNOS)r+j@#I3PRUIh;y z`eFiRa5<)JI-D(5SJWaN-;KBL^k0kzP_vDrab)NJ{X!F>CHbvTgYi<@G)N-=JmFgA zNxN6lDuB99cF*Yn)`O{K`&$wDa-EKKBA275dnc${=!)QL0 zUXMB<>tc9w96I}9U0Z)43Rgv8w?WDg?d~CK&H(c_?39$@ zR`#`Dy|VWBmQwOC`UzVf=sfdC&+C9gOt$+j46X6^D_IWZXq!p|M5YR$D>oM!UMR1} z41og#9->>~#zW8{^w(1Wq|87Zuz)@>Yui6A!uhq{&aU)STYC(d8GQukHaC{kSuB6m z3jImVT=_ySR$PWe0Kxh~x+zXuh}+B0hsv%9afUB^pL!p?a>m>4=;31+H|jv8?yR># zxofmk4{lASEynujJzA6^kea#qpo)Oniy5S&$mYV!PPM^th8$LEg5Ks6fj%H2kayN@ z5cqwK*tSDLxATF#k}1klFMgKA2vX!^FKL{bis!{1hi~Z)Eh1hKe?c65N*Wt~lcGgG z=2(1LUU^+%?yQ>MPxpQ2_pARGPoQ`I0Sjer0dE2AgTO;zK|NMUb1h&D#9XWyJK-!r zC2;&bnttdc%X^-sTPe^a(Z#PAcOsDQy6%*PpP0= zk!xFcHBzn7?RKg)*+cw*Enoiaw5=)$fWJ}vm2X0GsH}|DDy(RoXf=0eY zU1*xkcfI!0eoj+Yw$D}uNZ(NA+S+#IC&5AsE{sWQ6^CnU+;2F%sGlwsi|i<&`Osv=!Fm-8Ik5ava6XY zN(z<1(%0aP*6_wz4=C;328U(T+8*Y=Jc!=CL;ufjF}}FyVwZuz)4(Z{gR~oS`cqF+EB&N9QE7 z9!(~UfL{2=E~KuW)xTv>Ro(P3|5ZqMbvmQIUtrnB%)(FDRJ0@BjT=!V#+`LMlhAw0 zpzEcw3}=wIBB=0MgVe#2)xI_T__5K#FHUgXuJ^#F99k)xSbd85@iid%QlUqEl->5Q z-)2~v<@a9;3j|Hf2Uae4Gl*#5%^tkCIct&v%dIxsk&MNhRrIJN*$7y(I-2$IjJTGCxrrRi)jMYtpFKbBN&W}{T9FfU`Hpawcj!=WwVJjiVIGpfuB zm$i$feqV+t7<$I4Lh8mY>2=L22;aZfAP+$TUKudNn7ejILQC=5PKCtolLk0UJ(SNI zsS%w3;V1iX>BulrdANKnvKz9(n){#vhc^2>((SSGzIUmc)oR<7AijbEZHLv1L+OAxD!Ftn!4u^KSJiO2`PG~h*8Ol#fT?~& z!jxo*2UN>-p$PA4q z^)k@9B+dq2iYa?F{ftR!6&Z~he{yf9OJIwLq!P2-LEQG;>l5{~&t8M#+p8F+6Hb1E;;!NV9AjgXB zI}9^VDtyQ6cuV*Yo;6NAi_PyByxHbiq7^~m^Trlaz);i})U1DVS9ez}7lP|_a?(k1 z6MLjGA<15XNSe|y=qOx#Y+qPV8Bcuv&UyL5fFcnsy_dJdx80YrKn=MCbg&v>)s25( zdkfAZ@texi(Bs_@a%PdlU1qf@>CiTkv`9Z!nQSGh=BL31SIv=u!JWvc+Czl(Bi=v^ z|9zP;yY7QXz*0RA^>Q$?lGY2Q0?c6*Dx z-QF$|UI{~j4>#eqG-1#S8Qev>;lgJ(trKY}U~-;-B2-`I-&5G@bEExqHx{%i`b}yO zZ7%>FW#l%9(}>9{MO2LiDRT*UTF2&`_ab9H4qBxRMUMU4Qq8M7IH18!8ea7m#1fzI zNo4e-LC{@q1sLr#!-?zV@8yV87$tL`N=`=7?EaCGV9_dBM}dbecD*C~Rk2G`DyeFI zE0xQ)3-%*?;L9LnxB$u`WVAK8TZ0!tN}v53x*VS!=J2H!HwXlgV^k`(LGm|JH6HIR zG8vca40x`mYO@n5d~WqbnVE>6Fy3>O1#S+V&+jQ>Q1bg>utY{&hgbl9`~^%t8f&e7 z-2n(P01(b^9>@K?P}5DIFQsV3sLkZ(#&s6_{9Hr0E}FR%U)k+DHNVSz;C!y+I{oEI zSYI(Ra47EtaM#*EAzc$B)FQL`T*9Y(eh=bL)aEo3@S9zfBMaZ4EaMC}ZEcE{UhCse zSVEj(!>-c0_GONua>2JD2C5pF|22^I3qAF$IWR#i2pni~|IgGEkXS+G?T0Tn4}0pf z+1m@Jz7ejlIa|Au6m%>sgkPoVR=$-ktAha+yG5R`EF4&@r{r*No%#tmvPB8#G63VC zGJ_eSdWxr>_dZzJLAch$obl+cflc!QHWkEsigj?P;7DP=@3k zmBT#%XN9Z5fD~7)C;Xs(A2t5-ZDQ}Mt2@Dj^j|RHy2MDQ`q5LHqAyEHdDDj){32)K zTGafG$L(f0NlZEv6H5NKgd8TZ9)BO<)V<9O4`p%x442>H`Q4>{vT2s{MhkUa=9}N* zyH?#8>GW+r#5=8J?Ee&dnLSU5=2ufu3`~W4nfQ?S=Er z?k*;f(#f$kdFAp@rRZ*+i~t>)6jdG%EqUVWT_fdwO1P1q_Jne~YOjs#jT;%NRjyi# znev;RK(oZUTPdx^qCE_PCcW#hgqNKpUPG{>*sHtt9&mt0tq8PFpuL$mVd(}cJg_H@ zK}2WV#e9rh3DkN+eO1ZgWMgSxl)4{&A;1qmYrU;0n48-1q&8e1ElQ7RId@J;X4o>H zGnwF|2X5%b$j-|pVR;L9ZINL2wt`u?hJg1EpZwaYnM7!|NasS+@HNPByxq&&8=HPx zVH+p6h&@`F<4;;_yGB~*pKo2YBhjpzZ9RxyeutwM!ygk~&&wm}P*!Nqk--w^aY;j? z3Z%Dck0kW^FjdSK88luhubb4J6WuNk_&9FXIh2Ew+I~P1^|89pK=|8Fd$cQWpogFehAwSZFAE99= z%>%kM`dB6r=#Ga-zpZ}aPVwviZR3tw(6ytjyrBA>NWMOH4{mw}rewlzmJTg}ILe`Z ztV~r8L@Pkt2>M=O)c3wrd`=FYyL+@etIc970U7Ues?eNJ%E&=It+sne5D~1>mWrw1oho(e59&MEkuy63veR6#4NSRt@7wxoK!iaA)L; z7DVCU1Z2Jgc=SRQ@?ku`e;=)DJqQ-k2@gNl3ypHS^CbXDp;`7$SA6(rs^3FJC#RJn z%TaUbb!G!N>O9Ad?5*U}qRdhyT0MQpD6p=S#S!H1=5o}-6rH_N6HJ#6B@-@4!5KWP z012X)erWd~M5ktBMgb-dw*z&HkH*-L|}F~G`%)!~R0(_pVQL_s z#O5ojZ1h|Nw5Atrzag4%ybk=xFPU(h;4?Tznc8IygHbyKYZ-`CAYF!$&!>}<)CkvZ zr2R%1^oq#{%Hpn(_li7Q_<|O4cKI~bjL*GduZmayN-Sfb1k5-{=E0#NHojtLNFZ}o ze!AZm&ebWo(|K;Ku4R0?$!l}T7D!k2;}o_9=H;Q(s-l&R>$?}!eoo+s6t+=V9fI1*5Zf{Rv7I8Y&$~rv zR(~R|O41uZDZ&TcTQhKLgyNbnz-jy}S^CDn4|B{(u&r#|o{=)-2u^bJ zJDWFnDxNIl#npa=cJPoS1j<{;2Qqf%g3I9xCcMChaW&=3Co-YUR@FRvzh_Y~pfX<{ zLL=r6bLt2v&1`#@$W%P*Y9yGsBqeZ^+Z`E&QugjkU0A+?=7$fq;EeqHkc*mP+%o+> z+{uLY{Ing#&);>AY(I8mumj#|Jqcf$+4xic%Fur4*dRK-WQP=&pM@QU**`%J8sqh6ceB!4RZRX+$Z){o)I6xnw{2KTeiL?)31Ox$uz`0iZzy>y& z0`N-fA(Budc@7LDn!()flFBe5)TlQg(%p zmF!=ic_uXNjrP=@{K?4@ui!YjF!wL^zbl7sTk!9NCfNJ-9P;T2R%zO=Rx8!iCY#c9MEsw~6r_AK{%|}$w+pQn zC0PO+==U8ZtMru@5y>GBO3U!ND<-{f$R>z>D`8EzY(U!K^-SxW5a8fMz2alWFc;M! z{1n8CZ*|@Dg@AF`ocW|)EDDvm;!fK?f$`Yh*V5}IaZ25V(>>#5HGEj)vp195cME|~ zB$=kJOsEWa*rY*BIH@g|8+Dtf-&y7aw87k(Oie(Cy9~<9DQ726{|8s6DZuP}Q8m5| z(iVz9+cx1Qj4A;*dk_?847l^a$Js9m>&r&4Dja;wgsb@2?qg#i^de{g;Wo6s!8;Ia z_$sauCD%m3#4LDAK*b5ZvB6g1yj_I{@CBbC+J*Wd2I6DTs$8>Ea0{B1ak#m7zrYT@ z(uBI;sIf_*hJ--7Rk=zT< zc@7#RtzCOj$7D`=(Tdd*rc74SHaq_Gd;u=QuTjYa_oF-QdEgUK7y7uilsf%K-4$?bf9UUyNYfH(RwAOzG?(PV-(KqvjAnbwc`zE$*eo6_Ut zoxguWB(HNrFAkjHY-&^JVcrEG<53rmf_R1u-Qw51Z1|qmXMfZIDAeyC6pS<&FmeD< zZ9m~(lac-0tD|6+k$(*#$6>uOpEr6Fn)9grP`~FZ{b{cgcHZ`sNZ!lyr}J00^TU%6 zQjNa){@DT0VX#)_WCE)S{0IbU9eo})3&F^G&V5kxGv~IY$*3XihX%((r<5FV^Y6x! z{m?HPkSB4Q19a*i^synx$-_orE(FQXSGq}7O(hEJ#e3wSDTI1IU-^I9yVAENuWkR@ z_UQGZ!CMDpwg<#&5h{ZakJEe~?5r*)EZm z#!;Xf*mvg>M6>Y@H1ptN>7@|dfhN`-Y0c_jd7;ZeurrC(s#fRMaLlbIuGC$F*-;v% z#F|#@bQl|&mtbuh_Rto7zw5SpYH2}k$BJkWb07{=@I^I=-Y%U08#1DBIU;5}Vz0cH z&|)T6LrFI5x35Nfc!H{`XxSpmeZ4|a+u@tM1pQ*gYgPxBHm|fY)4RWUe!JYp58z8w zE}CoX7?&rgem*JpsXfSHI?KGF@HL>bNyix#2gRDpod|$Ou%=4zRX$y;N)6^K7;4{B z0nnw8$ANEdB1?{{1J}>Qy?)ikIUV}_9$LyW6L_E!f8Z)IRM!9eKxYk20mY9W*0J5B zG$nIZ5x!nwDR;sTwCNHWNO67~2CNa19p`VVI1&b)XvQvJeux9^a5uwD2@sU)GQ06_e z{qO*;RuK;Hz-bk4+GnAEM(Ckit{*STyII9lneJiOYb!RwMPL|~&osm9wh+U_T)gCj^&Z))YtPl)z59I5gX!O!wwOm7zW7DYboO=CEWnSUkdF#%^?w4eCNL6lzQ%$Y<@cs zerwQHx(A4@PI6L>JPYz_?Pa`Z$_jjGZfN^mK+h(@2!2_(>IdP)zG9k%xavS?5}ZF= ziv=tHjLrfs-?9V>Wk9wPCjhhJLn+T z-j!j#ik~Cjqy0ICpMUuiEd}45{a0a|Fp}#;Yi;EG@Aty~de;!?MqPb|XIeWAF&3B4l!Huj3LtPV|6vm-fp-^pcFJDKxQUOPx*C$hxRIRKVJ^;8ykJl?q(;eV|N{u+`HPlM)6b?{ktuv%^6e( z8vD$Gk5Nj_!sMYPVW!GF&^TH7_UtZnKv7+&-lpBHx`SG73CwFWjodAI9?isx2EdC0 zLD<DPiXFl?qpzzOT-L5S{6d9(;E8+_`g|j=d+;bmz<`EKWzw?7utZ z**0e3Kr@{r-g)q@KmzvOsTU%U^1kAX45c*h6BCAQc&S4oKL+m3({>h5{whcFOH)qM z9@ZC*{p4~~EGt@;9@Z1;09SmN@lwW9G`me(TtyFF>mA z9Je`H_9BM+Z_f@74<|mE@!((0eJ8o}DlO+AK!=T+I0Ri~elLN8LTR>Z#wS(>+pz2x z50Bkl=1cRo9~b7Rlu2DV#)-{!f5E1)G1URZaoNFlW;MF?56DA~aUrlA#N%L*aZo?S zrb?^b7E;rI{Mom^>cB=zc|$!d9hG-Y^4smS5 zT4XvGir#z0?o-&Okl(8d#7 zbmU0lM5VcOmERdJeGClclCf0TWWcCHjEq%=HQPK9KDc0w$hi`ynwyA4z*mV5_f zn+Vyl?wNnQN3Pr27Om(<{`-dp|Myc!J5r+}d~QcfZFHc41xT;oEWiJS^;&!rcCR%b z7^o|)T*zeaL_pz>>j+B?PoX@PSHMBa5UU`ogNO)I2ig!c&|%nYSEWw_Y=g%{w90}W zjq+RYnkF)3!y$`vvyr6bi%Ef&y8GsGHEYbBwgD>9XhU#fm2bvQ;oeTmkznAF11#7l z3_AE>18S9HT9JNrgsY+~ zu-9$HZZRaJWrWA+HAsl%ivCMy=&KgDrf2`RN6+0 z8Af;b?V?v+H7yMtT(^<1bf)1?u4sii5g^m5c{^30&W;&R=W6QgL>>f8QDO#Q0^uz1 zRP2n;^0gGUtcf+SD+1g_&GbjJE#(e8NTuYComT1TD|F{_Hr-zsaD>=#n=HK!R?uB9 z^Qj|!enPBK1y==NO`7hdKVHlWIvUjdsF~QqU0j438VbTAhk71Ay!CSQTo_|w+VDY# zZ_^gE=G>FK_F}g%HJx4oI11xw#hv}c!9LosvI&f8s{M#lNpK@Z9*^jO<7t8JyT<+F+;EC%*r4cH99gEONvVMgNWmC3 zL-8?)+!7>m#5**V(#@*16bhQ=TZqnrX*mmqd^!PO6+A;hnM7>uui!G+1irHOPm@c3Wu~oc^1maq9+ompA2yK2(4|6bjiv)f?OPy9t z<2}$m`dGIfK^>oj*R}V;cj>QbGQxM}<7s{!Ql)R`s<&;U{QcDE14F)4X$Jd0C6+Nt z=`SUB!)LkV(j#}nxAXu4+dcNE2V@FLlmpj51rP+e%^A77iqxCT@Z-}#onH3dC!o^{ zfo1TFu?l%n5~07sG0Sd81JodQkAg4w`F5HeIH(!-@?})wNLN)}1^ANDiK(J3n-!yz z?FRe(J3E#_>LutDe$3#%rk_f^HT}&D`tC1|>v9!;c<3tc@-;}JG1lC3*WR#FwsO|9 zSUix>p$b0p`e4Der7+iKC1t@xd&g`Rzg^uw`^{1)?F)j-UjdBF9TUq^y6CLi*(kjf zKhM43mp^-oe(QHQ>C3$YAGo`VegY7@S{m}bO)G&hY3;C>0V&i9LAW=_7}q&8OrPt@ zuy#$3F?jz4ypo*Z&^3c#{r3hqBz)$+y54p`^>zW)l*!&q2~wOvM^k1 zC7$Os{ONHOhh|Xl2`2nCFP#4V!FV&F#Cfd_elEvRmLHBV z4Ct)LZ%4)xa0wrV_Gm1*fXi$U%@%X1;IkpIps{)f*(DWD3Xgs+96(&o@R)CgUc9hZ zP^lF$z?_PrisN2-(29Uhuq5)4a(xAjk=U72@lb-Bn?s45=;F3~okh(3E0NxI4}5xl zV=Ql2nF86U3Z#hJsunJEsmqjo&@5Q!W)!_mJCVF`57?L}vdu}^jY(XwUt}UbqyddX z6PM0%zm;nTzGT?@#*W+$OHv*a$JN7Xwb5$Vr7FiWKez|f@)EqKNFtYdT{)PD9u3)i z@xMQEb!AAQ@wAim<5w^-!?Y|$>nEtkhZ~+FXZEmELaRV`)WV5w+=PVaA2>O*$_AJ?s|&$u_l%&s(OfYCXiDPFq>ii@yqy z-FeO>;3VR56m6T>Aw2v?yJubsHUQDhTtl#zr~NZRdiS-C4?be{@FW)yi+<%}NPk(K53IY#m2s(G&YG7DcC-NVc>D(WGM zf1W-Jl03GB7F(AYU!J`=%Jjf#SKB?mb75af+W1^}_27hSl82rZe$Wex1ZxXBcI*NS z=KbVsE%@Q959|aCc8UJ@P4GkJC2z2^`(z}i!7i_#dJcZ|_%@6b+(Pl6TmECQ|3iSG zs88~&EL|J=fsJi&mUe2Tr@Ss>IxjQ!eL8UTC)c`%y8=3t1j1*a zbg?caN{Y1IJ#F*md*~PXCPwnN4m^SRCe3fsaf3*f)vw;$iV*H?u`-MDn{UhN8Z!Ht zWxqU+&bLHWn&NxZ7P~K>c<%>j;F9WY^+87&!;?X<74S~2x!7;yCi5Fdn5>o~v6u|!F1l^$x9#13T|Jki+4OD~IzF-ehp-XI6WF)~(|+*V<3xfqic zD7|q|S3bLDpOvw5P}jn(OMkindP;Qc%Vz;hN$(^?@Y`N*36LX`B z_#zwB_S;C!XDk;fqM?0ZqVUEV{lzupRo#!Qpm$`)3s1&L+_QLoRSkLub8Fwa@6!cS zGGn{8hBB(YTD8v*-pZeh?Zh*lIZgHu+OwdHo88@=Vt9idB9{Ej*!R~j(8X_#u|u8+ z&Xo|+7b-!AR|YLrAVcF?*Cg`i(+h={qwlG*UOi5H#ZZy$VA!>^)M&cqxXvblnRP}X z2g}8D0~vNve0T#6apP>bvhHJC@YuFD{SxBzLgMEB1G0l&a^e|1% zxuw+H$9LhT-CCHr0<0NAgLx!a16Azf*TbH}CNW8w<*v2WsyrW@PKG^)ULWO7_&xQp zm!Ocvi^osuR!k?nUsiOgslGgNP4+S5FYUEm1)H2ittfimYYC4iPl}(r7_JIAN$z06 zGh-SXzKJ>Al2zxR0?f!)gEKC+^1ed@0AAd+A{=35t32M)$tIk7p8A5sHq$zr81pL2 zC0A%uJO!SZY$UF1jf%i3^|Ti&y@%85raBg`06m)~B05B6D%eW~-pIqp<|OZR@(p_d zbXR++88XjS56RtLCXM~ZXxCcXBr0`bmGhF}@td2Kh%NN2D*73Tw~Wpf%0ZI=M}5$i zoNV!{|1y63$_E%HoShG&za9F36`)@M+BmQx2yU~IWODmq&xJFIBcoDp*ki$?Xlr3= z&yw(S6;q;>m~{n}@v%Zb8SL%iBJV5bYJY+K%Np(*lnwUr(bTeuMd#hhFx7Vbw+}x| z$h>x04hc5YkhIf@VOrZCeP`a~QKGF}2N|#~1s3X)I?+P@!|f z0$;Wc7anPleR1mfG?zEYB?9on4B!E5$7+3zt2O3a*{8wwi)w&eVz75jjH0Ms#&s)x zS!L*Cyda%Bjn!vRm0AC?9vz}5a+7<%ty^6*1Hp7m{uueu(`*5g-U5VYnI5Srg3hci zF+h#xygv9vzNq7FmHghHl|kR#Y3D=FN6JJ>JUgw^3W#?F36}e(NtAHF^s~3`Bf%y# z#=9y342gvGz2tc#Qt)cu`rTf)Gf6#qC(PPSFo~LWp3MgeR0{5F_fH(%TAZnbrNYD4 zMOym!6;HNIQg^(IZ`C_-`oU%Iz+~77ZxJYvU-qJ<+%5|4H8!3um31(^*D0|M7&1to z{1W4lKf2BjRqAOw)AM&z8K9G0pwP009_N}dq!7-O|@?QB|VYV`5HtDNoH9nT+7;?4(_ z(FmP$y~lfl$wRN~QBZt!i$ixC#Qgxm-PyS)*7gxU+w~NFdMd$gRrMT+Q_?EX^tG3 z1Di}{=d(FvLMl(1u{3f2*oE+WFj^T9COl4nMXhYE#k5+tWoz#lR=&g-%B2#TerLg@R=?Qh0Ukkj1%^Q2zDjN?_ubhIqndfvRvaG>T$D5>7# zJpO96*#1j2#a}-Z*AB2S@x^IBe#fCDRz3L6)w4+CJo1B{$i`B`GHtPJNyqHu{@H-to;F@2(*@_|~4ZnS~k@l^_kM;C} z(RA2rzu3LyPncQXNGH$le)IMqb@0EQ)e0C~6gPiN2eWtnS?U8BqMae8#W~M{d96#GY+wM%SEHR8qEk4V6|UZONG7o559aWYNr5!!FjZm}Ko|h&jxgnq68@J(oAN4BkZIiS z7Y1$vl<}o!s0&tNHioO~*+4SMN9-qm0dD%kYRr{T?&BYq!2E31z zkqzj>Dl5k)30Q%zBE@W=@mS)9?g`b!35Pw=w%_G*P)$_*QdZ#9^DS%4+PJFpzOlS~ z{cLl@$uk!n2DHo)52INS*jV0bX|JBAcxl@Cx+$+RSDa=cjnKroTG*8)z0giZ{(M_? z6MFOqeSy!f-T22&EJ7s=;0pvw1{WF_w16q1IuAvpS$F8TFi>omuXXUhS^nGbu5@aoL;Xeh5gbE}IGeh>^L?{wz%GWaJ&oOzh2Y zyJs=b2)Vf0m&Pi(41R;wai;P64pX6K)s4jNPF{AS@ezb0`%8?@kHb3NC zZC?N_W4IAWhzyD8;*MHFK%T;L)@yx7Z&J>7x2l#+wsIkPe3nMN2}|&`v*rq;}p-)W3O5=-X#S zY014H8W-NtjHa+#^*6b$s*zO zLA)f`QLIL9V&L6*fP{9CsLjS5GQOvGb$YRue}B30X${zv%gTKKSm7dxlFiCc!AxnRO5ZqV|e2n1S}|EA<(E?uXm_1GX1Ppsd{d=Iu#p_!OnVyDT9D z0Esm9f*d9ApecxmC&Fl!vZ_n#0ewHj`#!B1MV+4={}5UY&pmdA{0g{pYip}2aUXXZ zY-+DmpH>lAN7ib9kHt>$wutd+f;1b#>4^ZBgM0hR1igA_CcOU0Q&xKL{kcSO$M-LU zs8~kXG_gpPmWKmlA+QsdVjJ5lCgs;<<7b<7@l1#DB~nwgmb+h=+%!6`;`g5Bha@Sk zNt^vlsOwfF1D;BM%IZ4%u~dJ1|Cz&D*V>4^bo|p?F=k+10qgOC%`cf8ekGrt%(Yir zE_mz(+tW%w#VLRdh?dro!%f=5ergT#pidHRW6#f*}grJw>xn3h^{Ltcdj=!*BfBD<|bq!~o?U2;QTM)Z-pONCuz=-em zCWR(-+jqEc&T*v+kfhK)I3!y=v=q z@QYUULysV%c(Ftt&Q`VAaxmcoaKRM3YV&Z34Xyn8sm8<@xBU$A>{7qrL!DO-tS3zS zrb+14D?u1)T7_hbvPvcONVvH|WM8b-1IW<+G zKr&oc?~A16nro=7+f7iOK}-DQ4C0!KMf^^8?9J*!P7LNa2q`*W&_UNY?Q_)I@+$r= z?@F-<=v>m<#Iv#yTpDk%JOgEk-(V8UW3f(6fq;`DuMM?O6C0Q8-EUNYeh4^~w)*DL zl5G#?ZNH2uC8@BCRJ-oPYLSh$fUu(rt7J{K@9yh4KUT_BM*H{=mZciyE2p;Qd8ljh zbneQBR*a4~Dd#I;L^Lq7c~j^7k9A^wHB|O|qTBa1&~7l0c`Zr-8C3ISu<~7?p$hP|(G_reh`AXlxF;!C{wNVd4R<+%%+)<{lI9+)_az>?6C@{?DeWj1kmF-L2ND~_csFB0{Z5+P zBSmCt2E5KY`+WZ$&^WP@C>15bv5Sw)IiG3NmA(ed(ZC*2*MG*OK0UI(eNo2k0i|X$ zMJ^Z@_r6AXnf@ zC-&yzb`%xbK3L~2epLp>802GPTYYiq^2Z<{7XD902HbZ%-r5=9D2)W{IccPXSYN6T!x#D7e5#K4 zwh}%}fC}?&R+8|Cz+@4e%I|@nT2y4jvY+dSfu2yh5tQiJOf8MPii?3=DscZ}PzZ4- zMma3?U9)VzlO?{a{wG2=HU+rk0_fV_*MCVq(rg5{WaLxM)(VYM@>PY+`h8Yi8Cpx< zmPvm4Jr|@FSTd@k$%YZGjwseU?Xj8gW{?+gWrR$2njG-fY=mmaYe83)TQzz%>u>1p z!ANTh%%6!B%tt|L&M$Su^I8<6fea+};okhBtu4M2K3LbXPw@ARH8h(#H zF1R!RIyAnx9{0`~z3zyU6mMYZ9IP&rZ;X@3#xv{fGoPke* zl5VQq0@=Y~t$e`=6n9G$Fo0zsR;gn7Q2p} zq*_D3_xF}8@Vw;*2|3|6en9HT$D&R~!I@-dAWFHp$&%327LTosS4P-#yo#O~m3m5; z%X!t_58}|g;AAo)8)%$MKpo{UA!!H33Q*>*1kxxZJV|kXz6so6()0DfgInh+JS%5f z2)mzhEZe7`dxN3q0BR;*S8vKP!|87}SKW7`m%nh4(g~0#Wix}S?So*)JNzA#x-6uSIk)Etw+JgpS|LEdfQFsu#BRQDEcLuaMP| z=>Y|WP@s4uJS&@;0G)egWv;;%HQ6BFbtdr=G=PxZh#(y;i4xVzi7K(Ypf{DwNFvJm zv&GneEz1kHS6K$f__GSx%QeYDwZy+WVZ9a241PE^HKJ~lHpsyun5ZQ;YRS10&OWuM zNf^uY93RdwX|U7*N!)vE1k?whmii17NT~bTG5a8B7X2YV;9_)*8XDBPrNC^6gGz)|rJ~PhnZz3CQ9SyybD)O|?5>PDE{|A& zIJUH5Rs&6|2+x!fW&r6nF=^$Fdp<{>x8sLCTRS@eCLQ zC(0Kl9VNE?laY+Sd-*slknXjR`cDR=r1|XF7Le5dUwwM|ohRmOb!znK3$CNJ6*?zO zDlkBcvj5G{12TYy)5?$?2!{D3wGGfqcx|dDvN0bzYgxo4liS6Y%*k^V&xsK#q^AH$ zU8Ng!MqnqmorQA}o(bkwE&0PQo1L+28yv+3gOLPnD0P4-RWejSj8`>Yt}YRcq>g=z z9p|{LDQOo!Qype{ET(h^@CSS++dvA`KH`o`bR{qCi*21jJ2pz_f6&BNwLxgkYWDys z*&F23GHzch)RfwH%^TkmZklb7A(yfIf==_IKQkbKEOvT)e>Vh0NwUc4-5k&!FDfb| zxOy%LlnL`pe_;iSP{}i5;}piVPB&l1>jaPuq5n0457-cv$pOg}|9QcGO!R*pMfB=0 zhF^CMSOBttq>A`LKIs#)q5B0-1%q89>fiqxsC>`qT{IB)aOJ-q^MYbB$DNAf)qlYK zpZNa|MgGr89^rFp(kQko3gmhCNtpL1pw9ArHPU#+Y5nu^2UczvOzr!T#0`^WJ({?| zyd^>9Fbb;6WRI<8e}Dhx2rlZ2La3ag4skr61bjzO&&~7jYc|%Cv+Zp?!Df3e-^=rAMH>2<~~7pUU|x!An!CL`ckhpk#X7hC`4 zRS|{?Oi?oavN@;OxHx(!rh$DRjVcgf=^+25*46x2f+LB9nn zDBNMP=b$1fSaf2jz^Lh}jy;d@2Et>G4_Sq6%eiOW3#DJRbnJs_borjyZD$Gcs>&rMEC#xa{wHwN19?+cyVn8v~olDtDL&JlWdz ze=CyorzcKdI4f*Erm{L#v#R0?L?7aIc`&u4)V~}T*H0$)l;cYlJVdy4?f+IR>2oST zO8zDtF*U2QT4u0Mt+g5#@z+p1XmF>)Vh_i|PGb~Mh&?f?e|n^&wRT0k$r4PF+3+tr;>t6=$p-Ute-1~rNYZ)8>%>%Jq zvIu0Aa=TL=E1BV7nf&kfyaaN*Qpb3xG^(%TsHr;x5F*-DXw;j86jBs= zRaV0OG~-7r*xA_u*Z0fdqG|s~2E-u!t1Mv~k%YT{WSJx5M__`(0S z>{6qq5MI5CJGrrvUc+hltQJK3k$myT8@;MBQZ4PXU^FluubCYC?+c*;O2iTX_>%;3 z!AVN4KbqwcD1Jd|LR+i1>f+cTq$d2kC*>f~hc59!59sWPqK|=&%%9RZ-LD3}YU{i< zCbraJi~@?y7D9^SNChU=+v$Fwxras{3X8F)A@0e<^8Gh3j)Ay$0n?g0JB(#&2*Oq* zqK#ruH~6oy%YpoV*kFLWOZVAs8Egr1#=n0ad6L!Tvtl&wqqmJ2V9c{r=_UT}u~p)K znvrJR@)^VjjRwxH+e?B)S;7JTip%B6y0>NtcJ~6PSiZ2fSAve(Hb{q!Me1*3aMEdg z0>7y0CBD>+6E85TJQ+aH`*%*TsoMBSxi53B!al%>$a^`Z4S(r0f&5HDCs~1biOi8a zbJVvIn~-SGxNVAhZHq!PeWJ_Nu)1Q!wFgn;>T^=p79ZjTy6csPWo=v_G z{K3C7qV)@)Af&vpPpX3~izC37cDV`c+-jq4e>qEM7_>@Z?DDT2#T|e|;dn3>2+8!^ z%G*G-J93b<^+Z*e$ z1DtAF* z$2DNEE!m^dHEj0H(S#QEoAl_U2Q2L5(XbSDiTZz*TcMLPFqrGnIfwu6AA|^tZcauO zfTO`=U<~&E;$hh^u2Cn}cZbLApd*X2(m8w0ObfVWY`^OIdujhFa7YR!8?S@dNmN4^ zCJmu0&1q*7A%Khw*kY=j>%KC`<9XfocFbvi5f2MnqsB2IPgI95EIDsYYkpBaOAFz= zII>>h(4k>0HwCVsiex@yw)TsG;P*Lq>@~7f4Z)z~wwYTU04^sc#^ONmc8VSM!>65u z7o%+8*U(u;SUGxY`G9AK?xo8dCIwzEi5^=6uT_Quqob+>qt-BcCI$N#UN)L;lDv>N z%xdj(9NJSDQ$Q%r0*!7@!REzO3b#{A3-cU!?D#}~Q3i8=5Uj6_z*6OQ{B|kHE?7TI zYb)%$Y_sRuf*}RNP7>IqlWV$|cr{OGjb-i=K_Y2AW5T0?2X%#=SHd7R9nglX<7p^T zwRK>weq9PI>pSg@1Em*G`C}ewEkW%&OF0$2<(DsqHr;(|lKkKXv>To>XTnsQk80Ui z8PCY4hqkLypR{(x<__Kf4IKuK-a(7Tweh52ZyZQY`zMc5Ux5PC`+Qu*N{O0hlxK}w zo#HMj8Dw=?MaXL@*bc;!YpZks{`tNY1$4CA+tf;XR}9Av9v^FxT>sT$fBE#u+6X8> zWw&uoo$j%7A)2_^V6f_H92VYcz_z+X#;3>coPAnF09s@F*)7hZmF+&0{JZgw7lDVa zbK0JQcFq{9URN(o?#?-KXA5%VEe){T#rNep@(Ul5%Q@OxSppWM-LUiD*=`&pd=#U{ z;j)vw_IL2zoHP%>+iMFd=4z}{z@gBWW0izg#n&P^`QRKyYzb>jIN)%B3?G?-Mdxl# zpz?!@`L*!qytYR*z>;_3A6s-)mv(Pvx76%@OY_`!1x!|mr;p3W> zZQ6LanT(Jtrus7E%ps--1_X`uKf@4zd-x#NT>0!jbryEU+Iy%a50= zFjO64S}@qb@s;A)NA9pgz}xsZHYRMeR5C`Kd8o4t?{E!{`5I^AjQ{|M=@WSWCCmY!eRdm*0K0~YfJwKnv2!i%E z&cIt@5YIXC$p+fpuD?k#C3NJ~n^4=XC%C&N^KySVl?MdpchMUMx?RxLaVKNrE46oB z0kC%EBe+EHyeq*sqda-dp*T3?Yn3)Lj`A#J6NroQ)CWd4r}cx}CHvK4c;OS<9E7I1 zRs%C35I?P+qhFNih)50wi%o^D;PXoCcDTMrZ(k-(Uj-T&80?k(;BUYaW?&&EDrEUd zh24dn#Ph#lP5}`P_Jf*bgf%mSTWTb2&3*J-+-0F8ljP7SfEVD}LjcfjPKpGuqf4t> z_V$BHae3NhCKD|TKXnBUx`PnTwmXChv)_^2Etd1iu2UaG;=C>vUGWGyH<&|#$F68oFk z8BdyG+p56ZcD*Ouu*^AIn=XA}!z9fyTcFn&JyK{FYMhSS$`D-aAG?;kr_{^PthA0j zh_AUjA0KtvKv(?b?E!fT>MbTs>5DZ#W%Nqm*hD_b&0g!2VCU6+BXmxW*=PqTF!@7Z za&L6Y*jPzJtF*1_YJ1oF6h}V9R9zkIy5&-H($j4@OQ(G8glnB}BlrdFVnSM|iGZ+} z;Pam|Q*ZnN1~U%dH1g0U^JMCrWA*5zCEr#xiZZ5R$=5F9c(R#-2_ifmsy?7LQz0u` zvEL*rtxqVUJgFKgaVg4!ZE!S>UonoRz-$huv}_tOZ8}oel>u$bnmI``)KF2;pU=jv zhU2Ndzn*93r~S}?>r@YLyEUZU6W1s}Yy=R1hxZ^4_n*H+T6Qb1foS3}V2mvFWmwG1$gAAH&Y zM4CH>Ekxwe9imHmi&wS-CdPJFJx|J^^?|$7OETS_@SvXBYXWQcYE&iPip>sDTwYt{ ze`yigV(uLIq&*YAmKEDN7%=|Bb78`~?7Rmx;9J&W;VF5lAxMxAC7AbD@bFZMsZT&jC)TV0 zzAVwr`JO0B>xI;mFCUTy3`lSG#4N9^Yw&0Dy9IPCgC!!nt#qoWikjnOFdJEGPlGC{ zY{u$=F*${Dsy%o7dGf0nt67`RPMn_NImi0S#r;nF)q4;e=qxJAmwyWL>51W|Y>-%c zj0Vq|)N4n=rc4Je(jxU*rLmsHc=dR8bb=ng#LBJ>ebGI_D87|#gzmtiXT8Sx%3rd+ z`WSj#@lfe@t&HBjE-HZ}b=_)ZqJ=EQ{iOb`v3g2ul`=t*ZCTID>)n(WZu{fZ?VX_X zHZNRYa^tJflt0;eiXCL6Gmt+T*1HZApp6dlmRn8wa$!TXbRL)WY);H7AquE5S=ACn z{u*WPwP-I6fEeYl&3=FciGAY&V1F8DB`)h^rysZ9Tb@}vm9G~cwXu}p?6+<;ya2{LK7=LZVspH_n)o>}XoOwWJJ1JHT8T)HCj>Z)6sP6{BiyVM9)>wn0Io1XzF zJeUURT|)> z&+l5V<8J@zy0G6}z}jlH6BEv#cPq2yj_qVsgZ4fnlIK;;G4$a_i=bq2FnR-lWIs75q&!3a^RlKhn*f*bE0+Lf zPuC3{cWSL&XlyL|W*G2>0h4pnFntk^lZmtg@MuKyB!IO5BW}%I;ojKB_J$^SxEORb zJNb^pH~1q=3mGY=B-~;9LWJ<66V!==t2#xNdF~=F$c< zT8qy4p^VON!ctibyKI=$MQ8Y4N!`}~kXAn5#I0!BetOces8xLjA2+YE|I6&*@|slT z3n#619KgdJ+iN{g!?gR3nV+ zOJ&(=BGPwyy>OxK8hqr8x(DM=N1fM#Cyc+og>u;W3ehwe0|f{gt-IKD7_v}<3h;e}EMSEGDg~ zBZoUf#shpSvwM_ZVVCVz-*;5=M&p`(`F8-E^6Fg8{W(?9q{~vqDuCqF=VfZFI@wa! zTOJV~L${Tx(C?FDuo>Lk`6<|a7FYtA@Z&}RW4YycRCtTnxPKe@V*JkP>s#zuOsc`G z;(W%h_CmTb^8|?8t~>%*dmJPazh_$O+vE;%oPi)>XPE~o2~srkZh=s$wp`~$;1+Xi z`|hbfE)(!*B0poLcfmslGQ)IL^}6?VvhoV;ixT6#@SZHOT#HFpZiC+m+hc^DrotMy zjw(P<#hF;aZ}5*MU*d__S_n+Gp==K;GcsxP==-GV!P#5OQz_NsxDBV7Xs%vf@5C0m z#&5{?6}dfdIbDF9yTuOeG*A$jc|4$_75j7|<{78Zt4_l*>MVo4oaHyv;P84+rd1_X zQlu{}qg$vh3v2V&*yS%LqhCNQ_^aG>Hx=Y9V?2$mlCC;@zjvIQf!UpOUFMZJDw4ZG zqm38jwt?vPR)Ak`32hWNsNSXK&n3Q;_Sg?le`z7=m}qB=FG49|@0a^!zxy>CTP9#J z%OHPf8C;oIlyJ+nPl1Haz+RW*KP>GA^4A-GNkFB?!CiZq!#mSqO@M0@?$43XvUXw@ z9>f3!4mhEI@Jq3~UK#2;Wv7*52`-^aqeXCMQO?~((fI&2wlMb4N{u!ZQ@m+9z&eOg zZG?pa`kgwYXolSaDNRj#=G6BG(`Z1k?hPx{dUWExcE4w4ku}+1)#KMqOAZek{q7U5 zF3X?jO6|Gc#TlqBy&0?DA9T1Mz7q6Co+cpcv}?l7escorQ%Xxj<#jid1u=3GlJQHy zf_w=dXSeSw$8l8&0{fPLNbNq@>5M*vdycV4WuNzx?7+?%tAKp^Gj!;3XU#>aH+@84 zsr(KER0@2NxT90aPaX@R0Fxh!bU5qM)8=T#bmvUe5D=g4huK_#H9wnIaM_#7~WZ zu(4riZ^w@M#7qFz&yFd90wuYE_qW%ZT3=y?Q#rG;bN+t;;|fe?_TV7d9p|wJ5o@lHEqcRw%1_9UTlhjk; zju2T3L7C;aKqjG3?W>iaM!FjwcJs-3|L!A;EMG&z|Vk;ppD!76l>^bDd=opiXW$&(ytmnOl ze>-~jq&1*gK%7!p=h1A0nH+=VV2dCNNwSLhJb(i!P{=TWp0Hc>$&p##>bW02bM1eh z_tq?OU_5>&zM5Yw8I*zVU(%z1n?Y|Sgtfb_57FMV-NDiszyUj1`m<`^Eh<6n)iz9D zmLz4YKd=jo71sc+f>E@AxL~0Wu@>hLZgtlclBj9|=t6@U-yM_yUy{EZ?OQHZz78xA zWSA?<44y{Xf}Zzsp)*dDYX=s4>rgd;9X$rVQX+jmsRKkU10F|5G=tqpjs5yND-Az` zWl)dU-79Wx_7#0RSXY$J1GgMZlJPIo0Nhb1og$lo`Q4xP)c#9y!WZakrB_W=aSo$0 z2Co&&BLR0Xv~P;jw)$dMABp8}Wzh#r$k0q^gQn*FyvltJVP}1K{o7c!*lFUOaL|+V zAxsZJVF!9N$I9ouK8s$9Mj#VX?Now-xGjDOlq61U;xfy<6m~yV@3>f6{D9qpXzQyS zDhkxgqIu_}UfLn-WLhj?M$)D&yjl!2b^v*1A$ZdqWbsTJr5Rn_@Q%q0>w{*p+BZ;8 z-97YNsIkvCmz<>+_yO{^7RU3plNrdN0Kl5UDTb@vJ50XRt(Nl)by{5@rLGxB$hc`7 zQ3J32;7e)*ny~Ghw5Y*fsW4X+R;Th_giRRWzO(98qr&(lXAm2d_#3E=d1}|>@a<$$ z2x1k?(pwqby`ljIc-tS#{h;(+0&UB42qUz>Rj5nt?0nb-xw&?>bq6hZ9Ax4*z zp%ky$uDADqDC`=&1oWgONRI>Es>j}{7bN`TA3i!^AXN6%#^iTW>UQl{@CA46UhCg0 zwz_Uj04}Mb?Fv`3W03&LIbQl#p?s1gzu=GYLK{Dpg~;w+sLc@Vu}Pq#)RdIjJwk@n zf$Veh8@wA(=he*eB4C!#wc`;Wtjp}tImpIa)x zi@4i|<3Fa6@}xlFn*(yn-S|Tf&+6qhta241e>cH*7x z*uGPx`Dfp1W(@_RFeg6>2lXEqcKgSUd!j^lKS50_U1O3bzX7Spy@UdgFwA%j3_$zc zdD1s}mub5`wY828iDoUwf>Z8+0fvQNpJF1t2p)8kKie1!8ISI7SAxNxTBn5rda}b#|Vs=_HncaZjvdX#9 z_1$8I6gF}>yNo623MMnR57)z3h_%NUAk)ozie?7C@UUn5%oQO++zLkEz5@3U z0klbXLM2QfYv9(QuFw4KcD~!;+#p6FJ_Izg0QlGi#kEr$%;^i?B|u5NZu&DYZsRrN z9I?@T(3|IiWQJ;JEdeB_1FNzyNGm20hOCbEQmkC5u zhz<;XABE7HbUCq)^lgKXAs2?Fx)k${J~oj6x+L`PqWpV&O8QWn!4YZy&@#$Y>bncq zN<`jUrhHeMHRYuD#A)_#(BtaeTx?LIBr3jr;!yY0r}=IwKQyT0iPT-xKlZ=DtJlm?sL8vwk zF1{>d?@|uub^<9iS41H67@Qv zZxg^+#9-F>DTjo;J1MP4@W)_-z-Lw|8{RD0H7E&=+Ey;U^;EeZFTl=n}TJ<<3W3Ype#CQ4{Ytoix~&n zOltDo2_m0ZM)~XYIW5EKiBnU;?fdBv=fZ#3H6Rq?M}|fZsEpnAwwPF%pRnnPjk%PL>7_10#`frfCSG7B74Y80p+3o6oplw+MPKz$Q&6N& zMXWVHF^C}+1vjX!MT$Ly+OH6*r0JM2EXdT&%riVJrI)sURcf}eg4hQs4w62b;M)_? z2FPUBbMfw0*$I@?m0vAYLRl0Mj@A?WmLerz;ql@(WntsfK;lDDI49*b@uDi)gfr3m!Otv3+#=x&yc@B z=#Ak}J`)>zczGSAhp154VTqNbqKy;$WM&#-lfkUgJGd^oJC(jZYBbc-1^2Z4LR%F- zZ)Ftis{n6bQK&CE^)Y$-C8+3J?z>9F<7fQ1ZlxGny9`KOUkG5BMkNlkkXKefO_o3S z;(-%9$@bvMGoqt@bXE7Oi}+W>RGPXMXPyO)y?-jV?c5B1ZxUrF!6zHe{Ez+KSNE1s zaR*%8qFwjc*)|1L(8~f%!vXEfqm>*H3;bx}S(47^*59%*Uf-r$BmHD;giD_yl&8-f z{c3aw1h|QcuRfUknX8ZKj~`Z^#Tj-wfXUV7iEbk{o6Wg8JfCNdX1m@vp^9}#l#7=# z|A&2&OcLb3&^`wii`QG3T5)SHalg`6XanVaOvU#@psi^x z*^9V?xh7E>=cvt-@=7S(%d}Ux8}Life;j^;e+;NhtrrLDYD7I=V^V^-f&=o;I@Qo7 zg?=QysR(2)cFFK4)uz0~7xTwFabob7V_bR?xc0h%rS9lre;$3-AR@8F+p6w;(DTcVrR()<%6Vi?SkQ+<| zC8kmqoD|||t`&_8)5?rHp;LC!wLq)9H;PiXI0tb@^5<*G)76iT5S!m9{ zaJ>IoSzOc(mDf8FqIw4l>7T#{2s+bW?+UUgTa^rJMjVs+D8VJ5u$7(h(%~Nm3gR@^ ze@6#NzE3AmC`-+r2WGBQn+$7#V`BX9`Ka%-P!x1Gef5VdKYtA!WFg4)T2*gX8*PV$ z#?$ln_prI^cjE7#gsjm{TT%9zn%y7Qn(!Ns@|WsPN0WC-t-N{P%mgNP%zvb3p;5p& z(PF;OKA(wgMugR|`==@|3jT57WE#qdSnSCv8ebGJe)VQT%Nd$B&LCc_4WMSw!=PFL z*fHcfnv@eOU0F(E4JRF(R8xMI?*tsoM`Lk1n-QX4loyMpB9RPeLc`~BD2j^JQjl60 zhS5V{3gYCohilu5zp-;{I`6&(^tZx3&q3AJ?R+j>6t{MnzRNTebBV7ODv)5D0kuCc zZIhkeN;zqIl+R;7_*n@q==vd2Lkjdn2QyUk!g3?ER9(O&@34XMF70@NK8v7`805_kU7^{!rXXkNWBS+9Od^~%U6ffRR2EO^*u=wV`ET58vo z-mofEgY9ojX7IZ6U~hTGt*z>Qj<&bI0qtq_-n`JrUkhJn1~YdDaDt(J#*4`3SjbL4 zJ;}t*LyLdSY0oT-69stjMW3Ork=5L8iG+r*PI7y;X#NIBVVCV#8TYzn&+G`c`9+z` z1Q?$}bkhExDxcWaayB;`=iGxz7t7P?d9GZ zm0G_WfA#2Et8BL%Zv{cxkD+p@*iJRxWqa)H9TLGR;HiNS1Dk)iU2 zHwzlX{(Q@3SKp}~?==;u365Sk$-S8vd>=}DGbU~=Q~h1g^#B~@4ihU5WN!eVZE%jF zp7oUayNtqZqJ&u-*!HD`%>Jf#P|;{+YxXiNJ_=EiH+ zYaL9v`EWkrRKE87lidyRd+}FaLDm|1c=a2|$qXo-k3K#%;bL5IDkcUHJJR|o0ZYNy z*1+ij3e3ZKT~{z`h`l^@H0%SXbD=qTT%Iu%KeP42VK6vT0DK7o zLzmxGeu(^Nj&2C49JNgrEeIhboik{fn92Ewf8K8IIFtUqCnurDLw&3*Zo)#EMjsq= zg$@WnB=Q?^tU66y#g|K8%EYK8)R#dVLXSsbm{$s1{?LymL?o@VHGkdtO4Vm5<|ku( zms-c@ErW4B7=^dz?!Z{cv;k3vq4RO0_H33zRMJ}Bw0b@En8w`*}DKv zjM!hPl7$}hnGu{>xEkRFHD+L|0ZF5Y(1V~O4uB1ML}v|tPlAnLz!8iz@F;fTql)}C zDEFi1EBPksj&Ni}V&n z1PLX8f^-mpAk`3h$(!eYxIf){_nbX5pL1r*%i08p90edxe61&5p7y>G~1Fs6R5R<`~i z6n4JO{o)u=JTSVDQQGYtTM0#_D48edpfc5L)79;=bRHC`+h%InrQdeT!al9?dtRez zm8kQe80ubk{oxmwSChWmM@^fQyY9rI`T-h!_eeCjR-lk%eXyKdVfGf(a?o+w{ruEyz-ocTUO_HWPv7mX*G_32((25#7 zx%AU~Na_!5)O)+2!p-f2+ks!HtGoJ`n&$=M=`}OXk)_(GYz^Dg;Tf7$WVdU4zq)nO z9ZZ%sGIN!-7n3vkzJ8W6vElHv_gN-M8<`fFIf4!^(nr42bt>&1S+jfGbImDD-6|38 zpC9zHR?{w3!-i1ZvDn)8XW}Q%`E43+Y0X*(vR#V>g%Aif6XL{v6^ zZRV4$VwHT^D#`6p*>$J1`@T79)=8@H6k`vf^8GhnuLl~sm$cCNmo4AkaVKinXRBJj z(?n*gS-c(qQuOWLz0CfsZJ(jzkgsK*_K47|gUryhC1^Vo7(M)~ zg-rL1sZ1#yfVx&)_icxH)L-}gqH6g@&$+@Tut4?x8(s8!-3JAiEnZtb%+*7_({nAq z<==k!{+p{FRUaCLb?lOiT+;hS7Gtvd&BMQFKlotcNxb4xX%*TKk?}*%sn{x@)FYu! z(}uvjkV7Y{j+x@SzqyG8^B(?hYd|#qksi}OPV7yD9{}*R|Jy;GK84K86cW94-Qa#O za&@xBTf(RoK!Y9`5ZePNlBSJfOIX2t9y$vf1R($tekQ2sEraIKa=?NvY+SvsmMzHU z0(l|2ttEgW#ZkE1!*j=tlRS?$TwSz=hT4zxA zhiYnkY&`B|G|^e{ zKj>$REv~2CemZ;BPm~RgF`DPCn=botDdk$o>sC<3vtcqKxvg|l-ldEcQtY8n?0O4; zp3lX@q_31B^9Jtokxg^48Z9!cEc-KBs6zK4J;Up(p zC>qK$=*A_YnuLd%wHl+q@Jt7(LAQstqb28N`1`KC1%vfA=IG9^Vn-)WkTykC5}d*0 zorn7>U1oY=oV(FPs6GV}_WtLcc22$SUy^meX7jKR8#U@k%UR4xHLZwqCMlcy6O5l{ zQ+m+t>T=Ko*i)OEm;U+FTjy8pDu$K19PmxJ9S*fj|0E>zHe{@Nd9BUp)`mESL?;nQ#P+~;bG7fD~q=V3j)A+B4x<~AgjM% z!tSP;=wqT(UO7l{)F8c?=b;)Qno>l9-Q)Ru598nTJW=S&TK2?E6a-$?5>8X;l2`+0 z#Xh`<<3*1K_Gje}lm=GmqGEIt&P<~5t-)5tc5Eq~x7?0^C@D^dUn*U@Ym)+YETB-Q zXL5s@YptAAV*#QO@=M%wXLHycs#a)vD=P)k)RF7_s+3kH2P(x!Aw!ZG?{3tEhK;CW zLy}urDBRHdiOB}F>u~m$qlxr$c%1B`!hU5-$;6Lo|5kSDF$`fCjmHryDRr{6g*4YdTpGS z3wv%&LSD^U}5sF=D6DX2;$T0qTdNjQ}yH0>s|%dQp-4iF%=~H<&YMi7+lsYU#)E7`hx=zy4N!^`2hhM{mp87oy zn)2g4hh|6pKw+jLi*Uep=@FFJzkM~yJNo13biMm8)HC!*q1+v?BjOJppijt5x841& zkucWb!-dj}lM{SQYCIfbMHAO+9h=yu_KmEpemqWS>#!aER4L`W#sDkK$h3xT*dvA= zp4BO(1#El^_(yrzIizr(yW|mJ@2_Fg9^u!>g%#>>traceS#@H+5Yn8_Q`bQ)Vnuhm66i1{z%X|*+3DWg-bi%!txJzDk z&Z$^kC^9}5-n>w|wRw7vfBSZtLq=>n4DQo4}HhD3*S@TuvbWwU#q`Uom7gd67924PBY$C z!*LTGn>r-H6c@zTQ{$}@W%3Lq91qd4pB6W*&gc%+Xl-9ZnJ@3~D3T2>-_5~Dy2u43 zL^9KD5op}@)8A`a$Qo2pyU1oAINuQ<&qXC0Q~wrRM1dDvn^ zS-xWQmELZlmIav?k}NFAE(eF?pkAMC<>E!N<6XDr*R9-?mZ1;c(Q1EvOvWQi&qG*} zmz<9s#f@h?TlY|z9#2^nEzS=%E!2f&^-dkG-~Mz$6rO*ij1hC{JBTbb2UwD+0UU`f z9lj06EeBh5{arv*o(@O(AoYgVVVd8rhYD;Fh;j}^+%;#z47Xiy>%C_dFP+~HQ8euG zQHoYXGs=^2gjW6vp@WW6=T}>-za!(XYKo#8j+3M(%-Kc}cMr*q+h==$3JJz3pG!>t z)}3N$25Hicq!YG*3_*tA3bN$?TF6(7f2-o{I#jyy!J-fKG{ zyAo5OHzCGja_?G?<9~{8bOj}_N5}Q~Kqh_>Lz8cyg8g4xZo11T&# zWm>HsB^Gg5ayabl?#=wz2-s)EYX>7T@&`otsvkJaDz)vK)z5A$p&4{~^S7NbC#5Hc z=%65FQ9ik*Djwwwk@xj9Tu)62A9oR;seciSN>-oBI{vw~rtVwr2A=-=_jqQ>^aBK5 zQB0Q6U1hEBaK;C0SC;si3RQ+$$DID{utRqx+Yzs||9Vzukm)r4;7{AF$#A`e^-E-N z=tt-H_x0?GpA3#I#hljN%-DfM6@K=nrh8uNAhJ3~6NYYP3m~122_iX|DMv8;ZHsBR zuTsz%Ten?5K~ZY_{gLjJ>tHoLm}yK8dT ztbw{qAZSiOK-9jWU&i9LaE&$eff9e@BwNV4ju<(7jSC*;vmz|*6YY7a zkoCCW+5YKXwhI6L&!1vH1vV54Q?j~;^?TST5cZB(l`cbs;szV`0k;4(F>(=F!_XlY z#caWjZ1q~x1&Jca-ww-FtUcf`K-REJcA1Tr60g}oX1ARWj=Bgi9tNo1$2*6dLPsk{rIeTUMVU6I=v zXncfNAeME_{5y|0HN_ALrV(@w7NJAw0IuS7fH~m(_7vte9{ICJXAIQixUJR8NnsC@ zREsj1fUxb)m#oNx6uVp?rNqbhUexuBGmeAasOB8}Xm}URB zuaP}bseu(t|5omt_%3hvqnl+&vI{G`wWfr63G`yrDpeT0RQol+bq|GfW96lHIamb{ zKspczy(_V7Yz8H+%Tzm+oRc0|us^6x%x8r+W+(>MLEa^7MS()+%rw@@!0DZ@Sp=x@ zTYY1&x<5H;7fFM9e`qJLiGVu`2mR{~VqlUDjU-@f@OjH0xnD3%6Wdif@=dj@E!mSk!=Qy{vq1Vx%beaNn?1>vA~x(Hv3y@Rg9M;!-`V>o|a zL=&O4RX;JDa4?7e18gdrCbf!15yaA1Wo_B6);w%gcW%a0wU@gB9{tBy+`f3|jtmkE z(%H=BsGHF75G1W7leW6<$iS=vpBG^K@JVptOonfAp&q>P72E#7wQ|sU+1$@zak53%XPWy&6p7stzKuIn!T9NNV37#fFNDG zqZbg>=e0O8$%g1J>Z=8oJ(G215}vewx+vEG^2Yv47L+7@bnJz2nQ{wPSF0_Ztxc`h z4E75(6*4S&u%7@7g{7b-g*;TlMiAg&nrkXucDy}9W6jdi^B2$A$N*>;y+{JPh_Ih`{gthFEuEwSxnYmW|Uwk>ycJIt}c01Y3+gK5y*e z@ug@d`zvhbt_ef_Osd?&Xym2Y4}bE&Uakgl&^rvaDlZJ*XxL?;u*p!)L~qE(f0g`> z@0>HjXyEh4i;MUT<4Q|#5ssC1QIJq)&G7Tl;e-xDS0+DgOXng=XP#syF!b$$<=(+t zThcj+N_@}AR&=B1Q(#3p3A`_xWFS1nUDH<<{!|nlg8Wi@=Ceud4A;#J+yL$*dM|LZ zfM2R4>4NFSr-Briv>usX>Po$6tYJRhm+qCyOElUZQ|TJGS_e!5piuW$>rl9x<{@0V z0R*NGE@z8hQUm^gc@$%?zac(jNwdu6ATUBN>Lghkz*_&vjS@$*3V7K(8hx@2R@$+!=Nt zD$^Eq6L=AD;TVLgt9=znag+q2j&&6lBY8}Z*-IYBvpV5vQ1m&^^xQ&qO{Qkp0|mMn z_>-5{&s7}q!^JlHfcZ#L_d6fpEd!+B)C&4{*ejn!QNS^Tei|oCWb`dqv&7{*FQr@J zPb#++V}0pX38!a)l!CP#RHAS@B9_p$LtmEwF`p4o%J?Qv0SkQPgAs(IRDV1 zteOPhAO{-ra!m?w)b>4CES>F}>BXFXCYte%E!ivQu7jt`*4h3x^(Y?(op_!mT2#cr z-pL4b5`foc=(avbq&s;mSy+!s>HCG&XdsB;dK}?>(lWJN2;|<&LYf zV4;iP@`@sEzN)#BF()0;GyX#9u}()thQ3%MNHU4Dz1 z%&`kbfBy*ll#LYOI|l9ioU%bgB(Z+gk~hlX4VuJ3a3q@Z5};LKe{I}Up3 zj`%RI`fb~k_^PkxPaW-Tdm4?3?E280{OVOFCWQ9?hd!8gm1r}c7?A$zj~oW5w(Wms Oy=7=}z3eLb(SHGVSUFPw literal 0 HcmV?d00001 diff --git a/tests/assets/car_tree_bug_zero_shot/images/val/Slide5.PNG b/tests/assets/car_tree_bug_zero_shot/images/val/Slide5.PNG new file mode 100644 index 0000000000000000000000000000000000000000..fb979bc12208e4c76a92e302d179c4d1d104c08d GIT binary patch literal 31177 zcmeFZbySt>);_#YF_73I2nfhFL0Tzk6cGUd=?*~}>5jFq=#W;VL68QKPNf8-yOnMv z7rlP-(H-XXoF{`&sfdyGBS-ea(yxbHcyIp=j<_wsotBThtco&beH5lP&?E003q z!+#!WKY0W`MCc^(-~-QAUi=O!yOm}R{^yv{ZRy)6R9?{O{YS{3p0c{HW{W}**C7Am zHCVoXj6ykUOWeJ!=%l?ga!OIrF&2wU)!&f7Cda)oDs@f439d&|jc4B_+)MNz)pvC9 zdG0O0_|_v{@4>@MCTAm!Kaw9Kx0W71PoZZb((yZGz_nUW8A8@i&j%&FJGp(N?mmb+ z#638;qhcX)`}+LoZiUW_t<6{R@1q*A>AOxC`zqYR3Op7H#o%#-5czy~1fLT5j6X)e zj(m!oBGW}aJxCdb zQx^~%_$LnjS%ZJ#;Qxm{vAxlhP5TDjd-RQ*D~H&j&o>MG-O=l_8v(4J1|p7K&Ge00 zosNFixz8d_q7r-XBlPrI5Yoiv-XLIaKXrF*q!gQE;^%iX@ienopP*ZT<*@R?HhmQ> z`=g-kmyTbrt(Ix0>_;f??z_ZLGh@li$d_(*KihGm{!yqT zm(mvA*<03Cvcu=??9jEYOW zCC7|pqm0glYaBYk-`+}OYgLNlW{bof)qp2kDlim(X`HMboP+<4u6sj?x=#4r7x!Ts zZ2K^*T#48@c;zIdp`qI=J*6BrXU(4z-Z;MI$~aK9yo8yT!0Iwi2E%~hOU??9b0LK^E@j) zr4p|o=kc^`z;;%Vg3Z-}sfIq%*)xaGpY@iKtJ*RR+?#N@Vn?nn&;9&|;~E6HU*1tY z54y6+!pVDJz0*BSt+_w@7?xO?4sG+v{XsUYr!+-@%H2~~AN3i1_4<(n*ae#EmzT@# zc4WPoAMQkJT;e|0u*D+dGCut2$y!DS6+^Yif=rM0Ii)U+H6rQL1(wfJ$`{U7-0rqn ze`;Ei1W&*oKTWN3j=t=IG*x_BC5y?I^KQ%qRmNXdTKtL!zMQXay^VKVUCwc?`HWA7 z;N@QL&uMA#F5VD&u=YVRm}`y{aFKiK4diVUbsK4Ib{!kq(yfL9hcb6Cwu*GqSL_K0 z*yHUtT@U84_3rJSVG0PnYVY$hH1NhssU+RqBZczey<;>T`EKAG{tnDyV6=?haHAakEd?d*@HAasf^OZ zhZRBW^mDf%&oHXFg~R%bAK<|_1r^%z+$dEQ9i5SxwLNc*s%nU0EU&ID%ad0E>ve?i zegQ>JaX{myRDXiWYrdfO3)vS-m|wu0+Flhk&h_$I>N?eP&L>-kdV=(G^;4(UjJ{&Woo&dV4}`w;x91#= z%kEr{f>JcjU(!7G^A^}?aq@;O*yBu-+?bFEUI`UbCs`*RGTm>1$&m!Gy~e)po{m!# zIjb^LJcZ!0oBtfF1rHUK)*3ZF=u^V}Jwo_ppsK!fXB|61zB35uG<6YfBf@i7Jie3o zT4?hPmSY<$bO{Z&8HBATtU4SB@#wBdtdC2tU=Bjq4Ze1(@%OWdJ#$C7xO#fi-pn(r zW0!9kOHq2)jfGb%MD_T8#aKQFqtU^ro5s~D%BR;s$cf6#4IN+VNgYnv&|8pr6MwEXcrL-j-T zr8=fW)QOa(uI~F2^*j~}#WUQEHOLzXmh%Xw-zt<9?bVfD?^f!L4!w)#*$IV!9RC)8 zRySfh_wuEJ}SMVeg~ ziDE8JI3B-C@ddewNS|rts)6-~g^n=1sPwd%=oTk8OLHBu=_|;00I>x>uW`|Kwo^qt zb~x1<;dKI@D|_=#2>CekZEVl)l5GgQ#~_p==QlLzs1pzRFBAOeS@0e0gU6~4{2Mgv zq>?5#@>}(AvgU2U4HMq*ttd!)4dd7p(er!}a@F>cm0{ul^d77~p~A>3oPOT;ho!he zM1Pgt;QfXRXC5Fc;AVBNTTI%E)6P?x@g33`*j4-)78d1oZ4oqa0`cvWll=>E_yp`> zTi@v^?rPa3mx=79lI0x(B#QTg*!Tqwsn0PhZ+0IrAAj?nk!$?p&(@ls(%g86Kt)qZg#^jJ0-2iwKEwsXk-FLy@5Cdv%wqy^Up z0OTsv8#*J^F}LDxtjOwQ$Q#~EU#=l_79b8dLii4v00)v5+MDQ9%p@f6 zHExIe)epSsZF!1GtZ0ib=QYW?QZMmHUkFM!h}lm&opFF%4%fvozsXj>HbuOd9Y*&6}Vv8 zU*G~4aDWXvl(~`DF^!%zYwzI=bUBFV1J6Z8r+1w&RevXE^JwsQ+q*euhioTT8IT?U z{<@)f>bAmY&xVA4-G{>+*r&!Mq_#hC8yWtH8+bJ3bNs@-#;<50KaMj6%&+3%)OI9k zL0~*Bte#*Y4)(FM-e-w-+B??1E1-0HXL}M4fRl+Ue9cJ_kZk!^k+>h+r&_mU(fn59=(gM=s|yrH%%#>7rTb6B757>_ z?}T>k_}>O}4~s_ksx4whysoY}b&KL1PgW?1WNG^rWs!#4zIv|IYx?FP^Z<5OKltKsVmkc-!(Dclxrpr+X^E@VE^_^1)etRuQ9ANGn z9n_kDO^I*u8_#1WBG^{-NaqIg59kYh&b6b*?@@4a@~x>O z*X3zo%FujHv3K>H>F68P@i_U(3m=1Y`k zou}Ym_}bXD7DT1I9>C@osuh$%*bdiBzG(KNavSYs__Aq2juM};T^-s77Ea6lWcil$ z;!#r0DJ>}VNvWBUdql`KMa*QnT)Ks9Qp?-W75d-pa~k>$EI#=K2+^mRN-N7f7~6f2 zM0|LlZwd`pT!y_7#I^qsLSw)IVpnv&W36cnhO(bYFUE5F=CV6b{iU6C>obD)*fs)& z(w&Vum20UP$BLhfeU*sPZ|9ZxkIC{Lz~$o|9NqD)z|q+YQF{P@lNT1gexJYuS3x`l zUcLFK5T1;-LPmF(S3-w}X9?q=>YVuW1TNz078{hwflpQEp_{djAUV2_e{`?@3He?j zbPOdqe4+UV{{4+p{AN=)SWWRf_X&s9Yggb+H~B-i8oX^oWX#)cAobazyRNN)o`ivu zS1av%#0^q>O5;!f9w%R{zniFvSwbC?SAPZyw zh^Hh)pId&(8110W8hc~MSsxX;QCDIL%a+awOPJMWH$dJ~W-1*aZ~=0>j9=5aSCF%h zLr?nQ?(LSYBCYYb{X(<*%hAu|OE4}rOBq8Fe(AL`fl;f6KDH`T$|%$~hR)Qlj;9-! z!g$0EuZxXq%wc5YeDH9W!JX^LMWQAOi5QgzCB9@}h6JfwgQ!wp>6L83qGY zm(D~P2|Z-NO32DYm+_g_@97DLo>S7=n=c%OCXJ>pfV%G&81(KQNTl8^FR`I3)AX_p zvEbI`7T@HJ7=%X4ERHS=-aBG+@40LDXs+Y<_apWd$GWJ=@2ry`hP z$}}rRA2;s_j9%z~Qo z^lR*!iSr>G2!LpWwMsTeCy%{0oHLT?-18&#Ln@kymco50<+{@fud9k+z z_S;GBo;{~k%8=go2c&${fT%rMxP0)1-t7&P3eozNZUM)kONk~L5rS2(RXV#f0aEyh zd(|AcjQDMP@K9C{{=%5Ta1=JwD20cNu`v~&Rfa6YHhwf3B<_|5wX75Aq_i2deO`+xh^QdvQni4TBc3Vx zZd?9Y`D3|Mp2=i%@{OE+YZ>{!!6vLnMz;H`aPZ3>!;sKW%hB@dB!puy-C!pamkgSH zjoZHPcwT$MZ!OcAEAdXKgIZJ6#;cBO5{CKR!u?>E&&2?jWO)KcJ!d}+JT=+N9Lw(K z?6nO!KhN59FndSI?1Qh~Zm-dx)ep<1_2);T(Zh2T=&F_PUUUwQjyX9wkqyTQ*nj-K zMZ-rPTA(CwyA0mKR~`2|R*bB|@ym7$v?hKl+&263&_s65L>YY_R4Jlo-MPQWF%gJD zS(#I`#bGHc+IcK8Hs3~M`&3L{;flVcZfuh-gKJ$1`dAmOPI}tA8mK#$2XV@RL2{v5Aml<9`sE{4c z$m=@$YQg;!MZWSZRXMEg=yyKW;vg1X`UPh(#hqEACT~JHxg8w(zDn$tmYCr3YnskJ zxwTznI$n*zd|VgaaCIaR+-Oe<5^~vERCA6tO+pyAnZ20)~GV>U}VUkmn1Bs5H z(*~(j0=YGH*&XwaNuL{18kM?MUr762UCm#^Vw^=+;@WkG&!ABA{gu{bPD!M|@z`Hq z+O?aCPKIvf>lwj?lp)jAb)z?|?wGCnsl!VpNu`b}qxnW{_4W6h-xDOEYAhIK4hxdU zL-y(PO^wTAH{=tXp8W6`O5ACXTdf+$uMysPGFF_WEw>;xw;4FffPNDGB*N9$-F5+& z1#{?SQWd)F?v<*qv_gjz^|W}XcyA^mqRNXozK(1xD^C_*TaGdhxXE-<%%r!-xus6?G?;+B#A$K_#3u15mh6U?QYL+XZX_TUbCi}`_w#3NzX%O3bbI2ibGRH z{L1jk#Wk*ucX2w3$u+XLo6FFdze|Q*=3EGM7&>>V!goY}fmdQmt!>e-===CgiR^)w zdE(Z3yC}(3div(0%H;Tz4*5GVrz4nrlMKr(i={rD?v3!9(SaJ)+ty4H%a3(E*}`F9 z-rLg>N9A}7TPYxem_bwv@)3EInIMT ze(a&!Tt-@RkVb4Pq1|<-`j84(8g3V*lhMUvmzgnl$LAFaw&>Vuk?c^kov7aoo-lNM znNxtqBxZd4Dj^xmUZya8APNY zwY(B5Y?91@OGB@?RJ+vqnE+NomlSwLT^3lJf6OPKL@Ba0E5b`Y46fOw3`u=TJh)b& zID0;-4?q9g^OSPenO-%0o28zoKUe8lpo((Iakecf=8tHN#G!qC8TpN7EU*xN0#|EP z@1W1zW~lh%m2NMKky0X>TMsW(xiD0>pNY{SsI*-wk{Yy>a!ceX_L_D$P&bALKVBh5 zg+djRBwaFcewmev=1If}e2Gr}S8^bRl2-e$;NjLKc6`d+M0iLkZW;%o#-(vNC&%og z;+cSZ%8qkVC3J;-H<#~6V}6EZ;1q90?8(;UCd|T0rW-%~^_jj$Y>vh<@;yAO z(CS`zv~y9RcgRJ^s<}9+WkvJVTbMY`kB1sM`)i%!oEnL6{M}PZWL`V++v0vL?o2`c zt%0R=;gKczAAK#-dvUbNvBou@sxHl;aoKTm;^#+7?KO%lVgzWB8~3TR;tR)E*Rp11 zCams;p2`ro+^Z^F;ouIV?IvZ>eqA|fiAKy4RLT}DjQalXKNmHAiMq&dzQ2xcbAEVO zjg!a0`SGNk9RC7`R$vMtbX-c4DI@0Q2LUaAPB~ACx7CtY+BS+02ks0gwUg=2j)Z3v zowLZ!6DY7hc=v6Fg53E9`LTC&=FTn!D*K^h=W-sL5SrZ&Z}jIsf)X+4t1KR;UQ2h} zxVOM_!P5_@ZES~M#?2$@O-3z>Gr$IwO+>fR4qqeL)X<5}fVLq`GXMKGo#4Tl6OGwv zDyG@bQcyLNfOuR8*f=aSNqur9jwxNwNl0_eiql@ATy2^>TL~*rI$YKIV<$YNe+NT0 z7LB0$`|IvaZbh#&1@~=n?lY~a81b3OtKz1N#-hF7^wSPbOUpoeuAQ~lp!8xb(+CJ0 zht>Qq>ECb>VhRm1u>#j+eQz>Uv-_Lrj4Z=l6ODlQ=wD5$8k21ga}<=uhf#WpRVbH( zB8LNkw{TiU7ROVq7or{)qMIKTJ9(1(->guKn@UNGj{s^!vs()~(5~sJ@mI*C!MKWy zU2F>*wWSA&IdQlHuMZz?Hjf>%f9UauUCZJ6_S?F)9hWll+8E3?!bYw9q&v$~M}HO$ z-!kio-7nVpf;CHSIiV-~b@UQue|W`GC(Ok~DZbm}5#L~bvslIP0h^~d#ELGw@^wnb zpt+lgYvs8Zp69CFr+2y~+iHeFLVv7xO8+R-4|Id-L0?D^v`Ue^)qvR`>5DE z23p)V;kUFEgfv=SnmoUIg4K6^05E7>qtJT~vmeSF<+52rcW5>J_05{)b7ZL)hqMSH z{Nn80m`(z;J!-RzC>ydy!T|MWr?hD7{QOa^lUOFvN%w-Ry0$2{TM0S|jcJDI`G#cy z};{ZEQa5pH3hoU?+TFdwsjMBx8-|!q;a7Q+#pHDP9-0UA^VnmY`r0 zG}PiUEGJQSwS*k@+emSe83wdoOLjtvW`BP{1$354Y*xx4eq_v~_uSzX*SrgkcGG%B#PYC8^){>f&Osfg-Ny@6 zs|wgPm;JqH@^i0si7W7Hw5&EBg(e?iM2DV4;jnFjk2|_E#4k8s;pf)?Y(0)o87R7; zE!A+gq)O`3y^?)1AIo9xyrLzl(_fJy)_zIIB;w*>36AyD{gDNraZSc4rDx2!A`OdQ zUv`P0-{>$eviCg1CVeoTCwF{$oNxt?4Zlr8Yx%QmwOqhDsx4_0@D~Wn10<+zF zo?h*O_IuTQtXQikiFJ@__1qT@a|BGJzne3i7{n6&kG%oy|dE|Fail;g5_OA156MFned+dRXSmJ?im#8Z1cUfM1ACFXJ;M-4_xo4bi zm~o~8%M#j^r^wHYwrIp0_@OP2jbv#QwY=qNuA)N<#3>_I?IQnba<$@;={@W-0&iCK zWp>npU-i!?9^+Y=TKtvo`RLP+GXKu^n?sV;b~%afV_`t!8Y;VqEN;q#L3=C`^G7qJv!>KO82lune8e5ltXyS6p zlRGY0NWSu=?^%6iXKP&>oy@EX^!ab*GY3ZNu7fC7meOjs(EH@etBY_OsN@fukAkf4s|Vzh~XVXb$C1~T5I=x4^i{yXF6QwJ;} z_xBq{4CcDu6pgw_9UeR+#HVEZP)Vk#>YywTcRYEy03E`m=UHp%V#eI_V;Fu3u~i zB956CsR%XO?ex(JGH0Y(Fu3lh#G^z_7NlDDg2q_Wlct?}y%FU3n~IGu-%eHoE#!EN z?IVd_orLt+24i1?j1tb8IuO|e5`qlN-19daaaUbai5bwpt_U@hR5s1C@*vzEnz=WP%09iX?q zhzhqiOU??qZhfh`B$)`0>BHd&(B37`bDk#xb1AFBNeodJ9`+QlWp8_y&yyEuuUmXl zT@4tT7ILxP{Fo3WG+-(t%>RQWR6!|9@C2?fd@~`aamz#n`Wz`)*+`6-BGjx>(4PHD zZ*eweI;V5lm0wn2QPp4#`VH`YekU!bn98k59~9f{B-^!+%UlK@vK9e|NxL-i<%A`$ zdyadfL>b~i7Y+{(A*en>Q1y9^gv-8|Jin`OV=PGLbD+oU5sR1jD37Lj*$b|GR}?;q z!|+t^o-sjQX%ja0&Vl&Qoh?l^p>fWw{`VFKt*J=&!;-f#l2+K|5!|{kF2Ax;`;Z)= z_~r>@bSyXQZ;#1BYv1>3_$Xa;@QG6}T(2YAB_c>{X+-k%x_+e-sAuIFFKz%WrSzB) z=7;916w$)-9j;?Hd1WSB!$}R(gAAi=pEablJG&vhg0;U2Xz)GJ9w< zos>6auXhRz@rl6pU8zuH2s#Vn>~!rGjqP2wX0qX^2XV#kO~wU-K&*#8Gzv>t+5#+t z#YNtH(guy9hO*>ar7CiYZJwE*qL%m`^wd5T##v*%%6{-6FDAC9v|q0R%+$l~5F)RU z2G%aP(>J~IbC+wYpaTlSAL+`H^-w54Gb~Ads9DBWLi$v?2(6v68K3j&4yVvut z6~yP|e6(F%_d2^v9u;kx!e>^vzzy@f@Two3z4VE5J=j#?0}~~s@clhJ5L>t|Sr>r( z(UPB~UwiX6r-jj=$pk2h1~2Zl*qv&1psP@|U3tIG=2Y?e)g^Q@lUqRRivLuc{FN<* zFo%T$OQub>;zfU_#ryKYEeOgUg6!|bm%w8aWC;cUK9gNXMY2&L6p z)7d;H#~|4aYNV0tfID$P=Bt<2AO;HRq859*R)#JqXsjNMl-1%6O^0cUUeQ#&ppD`o z;gk3a6pBWd%95|7V3)tdMr@9HHbeV}-cF$FhYFs!>x(hgW{^yKna-J1rE{j`!*L4b9fYKEj8w zq^R7vU7}o{d&i6QedB&B>2jfs+nUFnPzC#4krs!f$VvJaO1PZlcnw%)Y* zCL**kbPjGx7Fy7ASubY#R9~>38h!Frlm&pU%yJ~OrmJKSiwpal_%Pc#aG++@Q_LJ^iTbNFP##Kj0 zM^3ZMe!R$$RdAk+aL_Lps~&vL0KtH`hehZi#YJ@!%a<;$rWE8n$jey_x~TBVlG zd1~%6%f|@iwolr-5us-jyavZ!uvDn*e;ykX78b@$x4c~&N+1G-4H*M# z&8mU^ib~Zw;b%Z@2ghShwD)UN(5>s0vMiS2{tL545zP8q0`zKc8pt;i3!>$*7zUomG_81?0=LG7Tl-N% zqH%~77v7a?&cT;j$pYF8a{+q?&(mg_EX>6-r(0l#Pg@nA2<34}`-@YZ;NwQYl_lm; z>h0PmXS8@&O`}qbmL)Oum9`;A=$B!0l1gkhwLb8FFp_v+*>dmb)K4L(DCh0lNzs1K zB=MlukC)U}*)Tg&tA5Vw)8XOaH-={qA9^H2gS|szndE!_BXUk)7FV7>U-6Y0I&5>m zPhQ?6(CC z+yM19V?UDrfqGo?<1+w7VmI6wZMr5!y;;TaJk_}uv+amFMt|>+I~#_6SRTE|YbSX@ z07ygTb?sL4rCT$H%jSupyb@rz@u_4%>opS(3)Xd1_$Io5jyYfvrubmCiobOSbo8Cy z5}QHDjG&U*a=B->ts@aYo#P$OHl0@uhKUm!a}9K5$Jx-;QUzd^xUH{aVlNZ&zM(KS zbdq)_YI|-;bASK)3-lijiKTmu1O5$lhkcd#w}oEho*-cFPHa8l!2)G%7wj%;-|$c# zM|a}BL<-ik^p$UniT~u%U9{{*X2!3@Vm>vO=BiC!aSqfnObQCxNhKRm%i0=Q9#Njx zu^OOS?GH{yN5Lw?i0Zz2K-@wQ9z?hJ;t1hO;Kr}SK3=kS6)n44uIVFcNJ$DiXt>$Q zpAiW{m5KK53V)(sX;9-*6R(8haa>>vdDhTw!^qXEH!2*(w=&x}=f91me>|LiFQl^2 z3Y+Hu;dh7mtj7}nM7EzkmFLGtSYg9VNQ$H~AVp-qJd@Gsc4*_3xS$_FD1FoHu-m-> zj56D?iMkrN_`)@b-T#R`)85+jg)?y7Em62`g!!#S6UKId9X>a=#9rwrqli=xX(@Hw zI+?K#HAcEQK=v`}x6x>mAb2oDUl~4aYOC?D{mN7MpF->xSK>o43h00?{%ir&bDR(D zx4~HP_)OziYn%rLcV5(PA(v)J}IkC-wr5__(8zm2&X6)TL`fh?XaL#JwNQz~EME1-p3eL3N$$0b$QD`-kbRd|2$nE*ITs&lj5 ztqo>hJCDlcm}xIR39k0pTe(RuVP3^SKUXRAg1ISwZ?-N4us_CGA$ zQ=~wyOXpHsyZE94qRtnT)H)7(`cbCOBG#u{G+>sG1-?L6wN2I?N%iMBA{H1(5;T`Z zD|Y5e01-y6ykvehIvGrMTH-Uty((i@%PzY{)Nb^eb3P>gX&%sD%|}B%`x2EgiIH>* z;4_iteCgY!1iR2Z?59tTV&BOE6xZ)qE({w0h~|yzAAaLS}Xq(ezAZouIj zCI6VxMu?zkHPv8tA~@ue^cHKgb@mf)&9!h-?WwYxAUbeP_!Ua;(&298xcIBtM4s+O zQk%_*>-y*}$>`Ppl-qacpiy}lBfr`b)iK!xRuHIi+9wq`derhq7tQ}tKfvYam| z(+dTg0a7=0Ay>V6%M_V8q9`2qQeb%v1=pXZRqbvT4~)fyPC`F2>Tj}~-1A@Zc?0tK zD$Vvcf2Y=hj&di_T7E;sH|GqFH4p1dZ5N|+!Nl}x!?rRZD*Xh_Q(pYfF%8o~6g+uT zX3Mc5uSX#mdX3IQq$#QYDF%z46hsET9gRSjHw2k+AC&ySPc&{nJ8y6Ci3H^ltSNvQ zrGNYsyoD`HW$4aWUI|b~4@4=9g}8^6g(T=6B7zIudn6L19ihncLIeKPqLc`8P4$cF_Yecz<7lli4+;v@4 zi)|Or9}&4OwW!1Q90x{(>^w@x0eF5nmc-$rqXuRy&_NR&EXorOL`YKC6 z!K;(F(W|i>a&)!A(UZSNng@yl5U_t3EiS%*O7?vlsF^Y6)%$3Tbpon@fpTCb}vLx*a^W71Pe;(xtMXEXYdN$qo6O# z+g6)lsJ3#0fy#!cu}6T5iL%)}7a@+;6RCmIuUl|!YsNv`U~qd0oi?lKf{*uQS~5zW zj#C#&)7-hInVxdBFdQ4Q`-fIx&Lat(vfn|@UhZAk0Vbdv#=wi~J9cC+1n{SBp*?M| zd98f5=4n&2tP#zbGRN(0oylt`RJQ|Cb{_#R*r!?*NX-h~qw#&Tn@bkVj{m4tGj(KRp{!>^W&MvKK7C#+QpoFSHt!`N|+cia7tO zw+-!aqvwT67JWL9A;Qd8Pvd&vvJzR3Lic@WL6i0F;6|)ld8jw--{56|#Ih=8SUE>q zA-7(hY52#~*5+3+WiG5gkU@K50eR*y$6w4qN2u@rkH_Is8beX(SF>vr{Ll!cC>bXk7o zXfNBCXLI|Nd?~9f7*Kl2>Xa35&zf7k8v?IvcEL)6A$P@Z#nD*#z1x1gE1B*sp;a>G zSZ^-b7SnpRN8-A6PX*W<{dYD!IR=UFF-X>3f-R~Y%m&E4#FIh-9zd%?aS-r}UQc&{@t-ArfB z;yjFuTjX&94T79JU<&Nb8L5}7ejzv>Q=j6a&ncGIqZxH^P`_>Mg;3PeaY;2jT&Kxp zs(mwNGTr_r&;UR|W7-kk%C}z;2mT?-$S8b5h`WuI=a~hSBd6_Bqec<#TG0`dhvn@V1*7g?X3K%Vzowf`hxbSn{%HyjLnXhSMxc!7!wfO2UjdKGMQSE)qUFLa5QG zsm4Ok229fzTv6y%YGPNUPrjW&6Zc+eJo-z1my!cbX8!(0^pe{h=;E@VIPC$+K)dPE zyijmvg1V^3ePcY`A0;hk&J`!LupiNw#@O=yIR>N-P_&i7ctvv2AFN?-iy?Y4{94sw zi9nbB5*YbR=PT2y?#Wyww?v(|!eZ6f7}h*DqpT@$gy#2978Kqj$G*A>m8qayo0=6Q zbIo760_4L0?LhvOlp@2Vr#81rFxDTa{o(?^p-jzZy==O;FYG&WxA~oJ8MB+g$=qw; zB1OiafxH`_p`Bo)A|gSFFae7Q1}GA$s;=G5L90~)jT=oH^86=om66@4Fz*QIzcMV7 zlZT4+%c*toMFQvr8oj_cQ7!N0@w7c~so}9OYtHAG`yg?@ud-Cox>0Xx12f;t*6Zo_mu>+0c5h>}=s^&z)E{j)Sk;-3HvD8#0xF5=qb9+*Gw#JqZ1^Xx zW|GP_vBUvXkD^&G6N7BcPwQL z2a|Pq3PWM|_^h^=oS;}hH>4ipwT&^_8Mpl4cTet#;oYM+^A5XNF~fqz`>C}o5=r0G z5SC_&1r}~Rx?8*alI$xY?-NA^!yOQLlVHO6q4kHRQ2!(!{SNZuQt$B>t(G(6Ga&5% zKd>=a@S2BcJ7HTap^E>3)ulm@y`W2_rhRCfsO> zci!A92v@h(M)QU#qijuH7=`jYOCva>KWwuQX;Ncg9HLg9)6X&07nRs=`k$M2f3T?W z_wTp9%q}6*1Z{IQnFbC^Y;thrjWB#0S?SS0A4BNI;<6qol`-eYkiwiP{6Q&qK&00*>~WX6h**>1#N?bMr1I8x{xa1XD@pZ~+&4loYHZ0~eX@f|2Q^ z&bcHH%aOyO;my=KM?*w9T{`nOaIm%i0uHU-{{|cjD2D1>4iINg^Ym%%rE{#vlmm3! z!x;y#jML^{zh;&pAWrhtWw;}*)$Jkz9NdVwpeT+A<|;<|Vv3G!dCiwDr<$qjY5cop zUg0`GAQe7kscw?gC!`aKf=(#M$J=taH?=+|VrKS}8~R=2&2Nl)t$85YHG_R9o(P%+ zERk~$|GfP|q+yL%d88FxMKR-L5d|`hmee-BDg&EzNe-BtV8{o}8Qd_C70;-MGF!A{s%td;IGE1r)TP;Ousm z{DPm8I%Wr9(sw~lMXb{^vuGdjhPC?!*_q43FBjn8OuFd1sqw2$;IHY;Y4-F}>GzdO zi5%qJ##ST?!R{i*3n2Kx(65JE5MmAnqyw|eNQup6#H?b3QU8f@GG>BYmG0e$88bbf zscdqh0Bn_B38b-;2U3@*ljx30BQY3Z9c1EytrU!qDaWMrIHCsZ5&8$AjF)Jm!TcF(1c%x*d%C zRK7>3Jgza=p8P<6Z>Ie9tHkTRPu~6HC}qL^2`}S&>cFvqcg+0p=a-_;>sEp6?x!={ zkow><$Jj>TX#ZNKG&7_R1HlikIJ@FX^&tMz6-+p^aku{kD8&B10EHD9sN7nB)T?8G z5v+0^?h}RT!q#V{?&6Kw&V~s{FWG`AP$80z0;4SOTOs8jAq@P~w1UmENmSt9t=sd` znrlOvt~AUi1EX4f@sM4ULh669I}4$$ekaqVtajHSAeK}W!?WPh{=TDuMPHFs;D->s zZXy@H{T$I~;*&scTO(vbOewol{UZN2Iv7EC!f6`MN5c*8bmcq_NF7v#-OqwwLwO1e zU$R{lszLmu(yEA+aLJ+k!#r5az+%BY%V@kZ8z7{AuJ`dS^5i>L+(kf@<#Ufm$_>v5 z=m-lyE$@8$ln3?Z3T*}1B3;4f+0f(WWMi`hwgi$>CZ!p^p_qdGvgfzHBai;=b~zt? zpZ>-jY+mzGd8~RB>w-YU&Y1=I22;3x38(1ChFAk1cdj9#un)C|@4D{Y-0qW;^LQix zwak$wGgL$4a%|@H>a2Pu(}Rz7b!(gj($S{PeQd*;whLQhNCB{_VvQ9RW_5^g@eZ^T zYqAQ|AV^kJV#|kN;^C{QFL>)Fdan58`X0B^oOa0SgdL4Z01=pm6 zWUTJOR;$)ER;dfU-LUUT&>-#|@|CsRUW!}M=>0ex_Y^@BO9>|MGE zD}JBWZpe{gWjKcy=xPxvpfGk-CT;3@ZxSWqnF^>-P$cEtBJX=e&EukL()|TgifNawTKSEPd*! zZ_K>j)||54LN`5U2ls{$OivIEq}=OV9m!vF^;9Pf3vv zJv3VWK~j3K zCV>P+Lkp^X8c3+3+WR8w<}&J}KU(g(HkY`#uQ)f?BPgi>oPhg)HLxrr%lbC-4Jkjz zE}yvyAb;Ic{JocT5{KV?_>wPL+MnEcH0!VVO=-Z zYGDecHGUI1NmZG`G;_rntddl}>;+Kl=;!{j7X*Zq&ancYm)g=iCE;}hUR`zg*aGXB zR0FH~%lT5mq*Z9XiIYfuYfl9l$k9S~r-EWbCTJ?|OW(V|9+WX84(S zq{;c;d|XgpTf_`SQ#`8Xk=)PVt!w1PswD}i0nc~x!_3qG7|V1Ag}?^j7peyd{Yyq1 z(#Y=+`OB(J03FgJt%}jRcos|%N06@0`2*@!>4Z@b?XJ_x^iig1i&z2scWGH)-7ZVG zNt;Zuu5R{OYKLPeJz{@a?doO92U7oAu@=TDlnA6Ewtzr?NLw(e)SM+!Rvef!OB6NQ z^G6R{=B8(0byWTO;o_-`BOzmyqspWzC%kifT27Pc&~f)UcCTo~v7FRU`7nEM)?nk2 zh)Q8ao?5GfB=97R7`jy{W-?2{|C*5sr|N)S`BJN}0|~<=yLuPMs$0o^L`FOjtZ$1s zRXImrFMdyKx|T)F?Y#G8gBBYXwppXU?o~hIE3_DyYy9|jNBQ8c!rBa8ZboZV=#|68 zvq`qv5AJu6)fLLqjASsRr@jwHTF<%{hhY7((Yy*1-kVJyWIXwnzG&J`PDt}LxUn0x#v*Nr1wN* zb|2(H6R0o$o7uyaJRh2zdk#iMtH-xbWt18k$9{(cJ2awGsVfHA0kj-L8XKdhsGo7V z_c-KnKI$sJc)g@8OnKR~(p_r#Hk4P_Z+~AuAXUZ}_;v{lZrcSliTOt_P484yvZbyL z>P#Nu<$6I%30Z3l^3p?1iwBEQ*lb!aVLGns#1VKej0DjT!SBntaCVal$nLmLIMMn#9w*UyOg02z+| z4W9sKvW*DRkt-u4&e2^!mi9$v2Whj;c1l&mK?m z10Ou!HNh3HRmQ8*lP@SGH;Qdy2Ctf$#Kcnr?Na`t+3@ z7*NZJPJ*b+$7f_M?~e?2KS@MaghUiqI8KI;h*WSC@YYQ^kC7UcqK`tIq^nKhc*pIQ zcUv;pvSfXBpJN86>A6Na8Qy%{OnV*e(Or%!>94hXp6lSY5ErU&tRuI+61K z#TDRYFpQBiN1BkQXBn{Sr#H@^{rv$Wo9q5fiXar zvpH(97SxV%yx(|lZAu`Kl8CJ|a9RB8cvs+(1|08_#e)N{`k-ojzRaxCb>Qv&4%OUs zOvK3H0$Ts8`O2!NP@t%a`&Vk< zuKiDDVD*E56Z|SzgQ3vFrE^iKbLhJW?EvS@Wf#U8iFDYzi<@JInv%0*>!yfZR|{6{ zbgP%|UUaiWD}q_$zWkUJ#~^c2gZ!RPj)|;nuMrr*N%t_z-?*Lot9cL@@M3E7{dw8P zFh-H-ig+mlc^{z(gErgT_T1@j}aaAgbOx9mtB@V2luuM%n4keCK(ven3-spiUae zzc@vdw)O?DUjrP<;1o6toE^A1hlS}rk-W&op#Qfs2BXlme{?PpxKm94^&}I{88cKv zQX2of=!g5ES1esn|n z;Buv$*;)}zI9y7QQ+rQpyFB6gLeK8(3YNoULE(|Ddr+L>=?cN*Lhx= zp(5lwFG)Mn5JkUt);_}oG3WGh#&@q zGJN^&`^fRoyY>|L+TBjh0wUB9xmMFhd*JLt)UJ3f?-`?ev6oR=*@cJQ#U(<%Z0JH6 zH;eY{%hXc?c8T{&{HfCo4ZgY!M3Y1+4rDEF2XCv%QxrYZs?TI5LM>%hbMs zl@f_`MJpvoIif9?&T=0E+-(xmt!9PTVw7>^sD^qGQiYAxql2k#-Z6c8@Q;!pqUG2y=}(Q2XP;r%zV zxWeE`9?#TCq04P#s3HD5!3LU+jQ>H5Kgz}DL-3F%HNKleFCgVuIt@70WCL_q+K4?^ zyPcNU67t=zJ#m***uO7zNoP~o{ky(%C91y@vb_p?FW8N*NyA_4>Vrs){&OK=hn>p>l0?kb+TV)(9 z%-?m37SeTM2zU_~hLA^!o9#K5AUP6$!nu5_)$OdgHqvw548`c0O+0;OdA7 zSczjet7Ds%C*yXIkN>FAlCTfCmycy2C4WPojSF7C%zw@zYFNk>f{zSQJL5Exayarw$rKQ2 z0AIulMAH$^{;>SC$q-0&GF@kF^J!V5N=mK?_tlA;D#~M}2RtR@>|sr^V^V5sWk2vm zLwg6d6v_ZaM$$gn3P2lejFuXxWhG9}623}z3Kf%z9F;b&`gv43OLF0_9}I|}fOq>G zE1C)xWjGUMriI_*Z#i6e#;1_>h^F;g4kDxqn|+@?KgdRy9&e1m`b60ePbFA4I-W5p z&PPI&%r}KFwW9>RJV#;{|1(6K#;25opZ(cs4T$gBQd7CjH9=c(!&x~&fQ|d1ymjN+ zbDP5ToP74=1)dzCqMMia8iX9*`!=ngj#=pF7&7D49zVT*+IPCq3;L(+(vmd0!7po9 zj{Z!>70&ccXHkW4;B&H)Syf=4C(s>KVNZX{0hW1$Qzs)ik8y+%XD{v#HtEbv@S=Xcc=D=36c-ZVO5B; z@Oq4?eFtxIQ2&8;#qcr8rbGRgk8BT0Z3b*LBt|wK&uRv}*93p2eMsR>BMd`pPw{?K zLpN{(Irp>B`Kb!R*Y#ki4Ku9uarnXrS~soiiSzcw9sER+)V+hU?fl-cbfKJ2Z8t}u3v{_9r!=MogT#Tei*Z%i8oY7nbOF^M%DW)p|GcXgUVfq7p z;iRt7f*E};6`UdDW)6x@;IbHse+KF=J%JVc3d>ruohRs>+_p;+t1V)CS%`iDkqhYe zvSqVph~84{1aZv5nWE1+`C2a@j0)YHG$7gefpTI8SsTRMm^Ncuduw__6~&cGVb|JD zeY#ig1pLmIGku8c$F<=US%XeEv4bsV9Gr}Iv8N@4jkQiHA;Pw+akjwa801WCDyuMS z<@$T7mADcoEem|YDZLuCE+$cP!G+mFWU*hVExV|Ok8DzHx~lA)#B)ONP_wNB+6qOO zuGh7#q;8N{?dH8JbGRf$M0{b~ZGBqW`F{0t;MvJla(H}>EW(+1`-_S!FqqzxRby-W za`fyH55iLyp0N!)*y&WUVbWy?=sP5>_?@9rq)J6ip_haNJs|yQ?~a1UQyID7x5_kq zITy`xQ*s>8tGYr}omX6Pwqx_waSUjdq9dN2ZhOj-%N*9tFkDQnnt8qaY7@6TeX_-&ff%3C z$qMX=V5dMF$GfI8({aCsBZUt zU-x;P%lG@a2EoNT-;UpaeM?z&W#RF48+evsBLzcoo-Cs<7lQHEUaEwHUP~Uqm6CyP znpD`lh)J4FeRNLCZ_6;1(Ypt~ z3PkKJ5QopzwtV^uu~A_{IqfP8<$Y>J6Lo%j>H81e{QRjDX3y~Gq>x7^*HtYTZ8+1- z6p6&V^r&sj0fLp6|CIHB_cYkg~tG%}8x%khR7qhdMN?2Ks!`~eea zXtO~C6{n~R-cm_Doy`idDI>(_3%YEDtG_mdO9l%rpz|SDC{LKAX;<9YxGx?#(qH=G zs#$j6#|Lkm22Yl~a-7hCl^$1Y-bKR1H~Pgt>gMo=lJ7D_c`$`B@g9=x#huAk_zFhZ z=48ED)AV^6OHD_GC-#xVw6*%rT_IsmT`Q@lP-_U`uQLW zYJi0+J?q4SJP-fU1qdc4z9X*JA5KiIJgaD-^OLu)RcATR1Ud+tC!>ZkJ=ek#H2T*u z#2St!HXR>ir+V9_1eT#N6@L;bFrEf@K3X6vomwZvv1B%a>t=G&*{}UP z2@#os?%!i~J=+zfc6}CZL@5r8lscvlL!qM~@USX;Z_5o_LL6(mC9;d-PTNH7X_694 zIquMt>A_MgeTZNDvIfp@Z7g7(j-ip`S74&*Ag?C$uu@K&I*vTkq>i?em5>9kni2D|o z5UvI#i5Uu0-UH@;>0H<_+Zgv5u63NV2XzVUTP0JGuT&!Xy?7v>A~h`gK=@NQJ8PUGi3qk2ek2`;U`-L)gBlSz&g;5Hx$EWSw*5b1f|^ z97z%G51B@*aQAa=`LA!9&vWJ8yG2Hb9O%cdydE#JsAu17P#*Y<%dhxpbV1-V-!)w+ z9poArM`bc(KU*RIEV!G|sd)k;&z&YcnCyiJnnWGU0mVm$Yt>|Mf|)y*HqWHzrt*E* zyOQO1$j7MGz+F7S(qSKH$_ISTf&ohkJfzPh!9?9K=S8eW&9r zq2`)_PKWf;dvMoCN=p8F(Zeuv@?GBw216TW&(}R*RT@&41r4xFMQMTI;lQp6zkHDk z2|#R_ib`bg$tB(}{id&85Onp+2RxD(T&%v-uIyIVnHA_29PwUA2}jZMn{^RPFR*nW_g|%G z;meJUvISiq9OLSPD(lU5AAaSZ80cO0MR#v!y2QZO-Aw_E-z=By3*f=MCQHeBGy!uvEGf0#j$TP2K3*j`?_qYsKHCnuzm|OS6ol}HE z56|zsPrsWP_aj+;qKNYvEA@Jjrs7Rhr_ZhBcBt^B>{|9Cmdf#@0n_s=v<)21ZTj8= zALue)lAF_ahg}A43QV4&)H*$+f6(^S4EU9P?bndOD55-R3t8?OBOYW}6uJ>lF6Zil zoQ9Lim(+2qyL)YMhT;l^L8zqwSu$|^N@Uu; zV;?$hjH-^T8B^P0^vcS5fB;bp9)s&2>VU!pBf)wCiUk~TFYK8LK)T7AZXm|^F_XCy z239pMmge!~6WT`KlTXc&pqi>TD;6WtGr!95=!ZFgiAj`4B$B6{ed5I_;voyK&)Bl@ zi8G$cx0}Arb^_{4f9;q7}%FN&X&f6@6=MhLn~ObhzWf|oZLG06U?5uVi#>ke+G@3k_x+P@#+$K4mjmpx|1EZc_uyZG7GEuEYy7l}{HjqT zPcSW+RI0?#T@v6sO&^XdzYE>eR4e2H4ySHQ5-L33atqKLjl~pyix_Wq>`EJ6d=O!? z|14ZbZNnR%*BwcWV2v9Qsx7`r3K6VoVz~=9)n^I~Ola^T7H!6sR%E4rU(-rN`wc!r zHqe+~x0M9Kj9<)zhXiM#aJER$w*eympuJDML zLMg7L*Wt~F`l~;7Dzo~Z?-akkfh>nJwOts1BHUc{FFYK9-h0bRXEXEiS zpWQ3$Sdw}|PE%wBhec-Cv}xJv0ulRlyOJVSss?vBjz?wf%fW;+xedM4?L+@vJun^J zufEl52vK5tdj+s&-w@-vTSlC}a-F+z zJe(kvgkorAt2<0WnprP=>~8d6JGBT24A{YE>IDq@J3%YOWCU#i_()ZE7gJLE;B6ol zk=^nl-c59nnvO+Bu7p@B#CZ%Vo;=R*WGZz@sLGsC6h3l31_CZWmx@dhyU1w3J174a zgHm&aQx+oV$V;uJ^R{%z7Ck^(ZgBxc9Aq2M$<}tDCoAt}76vWuopj$$!ssT#gi%mN zQBP%b+jSc@N>Uyk@I~owbP*s;$}ITPtRjhG^9RB22&}QEpWev*fQ$`c1(o6hkopbm z$O-kb*&yiloFD=v*yok0HRHP|)&$I7gO%(^5MpP?zl7y>beJ(W=i%5X2&vYM8%jF8 z*pU>zfPz{8k7HvKgXulAM-B~R;TF!tcgmXluzG&jnDF(8TR8*60k60FM45|+iJdT| z$V}VFeEZvz@k4Fu`SSwBY|J=lah8^r<^$=bVAAB-S;XZG6CvK`2sF_9wUim)P5^PJ zIQ!2*zpJRur%^l#*ooy3#U zGL-CMWZq_+&6RuXkmU>eUfsoW>aksEX5lz}wns)5cYo7y~UDrOG zWSi{OvSfTkA&&}zcg(xLy9G%yQ?nq!usi(RL!6!ov@XzIlx3#S(HD@6h?-hv4v)ji zpo1AM*<6=kAahRWgPZD;nJx2d?HY9HM9sxDS-LAl|2J7mPxR5rZX=)j6cJWtwq3ae ztH6QiN^^n6j7C6g!C=a1?>5#Tj&^bDVD(!Tum8s5|t?LY?1k+DXU>F$~F3pce+h;A9e-27|xZrz4IS<~39IWG-h+b!a7?NeZY zEo4|3Z5o)XMw9&(-YM82rO}-kGv@gGMTP;39m{OufXXyG%#HWiD=O^#IU#;#*EvX9 zlDy9KUt0~c451f^$xMqbGA|UpLP#rxo@sKf6ZWIZ?tn@HGAF;cBW>iD*O^c>CTFtg z90w5AsQ?bdE3LFW7lC+v8+aO_Yum>LpVx)R<&7#gCx@38L%|jU@fN&2H7fLtim5M2 zUxwGtSy|`ksW$Nw8@tx)8`9szb1i-M3^Fv5zP;ILowT4(UAJ-tn^K;z+M=qkUy?<= zCYVVV5%m9Rs`~&Ucm?Si2~g7FCOdM} z%=edCrLl>Byg0lXv)z}`A$au4Z~>#Qqx-k-N0!aGM72!<@8D|qA{q-1hkl&F)#Z5a zGh!1Kj1fQ|I)Copa?n85`;{#LmqW4Z3r1sa_bS96b*I5KeY=achyY%z79za2C#A;Bt&68I~C5bOncxQJm}SVG2v@jJ?ChHbU|28SkEGv2JaVX>tO z#${W;jL*2`Sjrz;+ABOSt<}(MU$Du7i3(4pRMjn&L=@)5y+S4mGF0KCq;iJ)syyMQ zb6TA714p(CR=w?ex;0+(-Z!L+N-FJtqdwFzn{PeP#zwi$FN$VAFUei#Q0cJ+1`#L! zclD=N(rJ5Jn%}KFnC>mvh`?>yD6WKQBg}MP0B|Q8uD|DU=zPaEz?pXn#*2ki)L~8d zK48?vYuQH-x@jH>d3o?RPo5sboDvldZaEKcd|G<)C8h=pKe5ad)Aq_vMV08>uw>mt ztBejML>S9e%wM)?3OME&Sb3I1>-s`vT>SlF4n$*tW<7u6zxLL6>XB?9(JL+8uDM7_ z0ta^MM$Vrr~yh9{Y@Jla&QZ-zDli~u@lu?cC6#Ng}Ul&($;a0_5c z1jUQiNV5*os+IIPn1j2cEg*NjxBfK@4f^L9iW2F13!lx)40ufiXxNhktSe^?D+#sL zDBa6{;;0<|W&HKmMOOBaFqD=m<}!` zHuhVLeLHN@1&5kM;aCT#e{aRSKa6NMT{NeGcKtPo(dtn0^OE8t($3Ry?Em<*a+~Na zgOuxj{Q{p1_2Z8=g+qu+rLI>(FCKILAb?h1Y9BI=x9zKJxG_?BB!uBv@M1<)RTa=) z!cJ|VX&xhD#$O_^iz&FZ=$UTmb=qbE?HF#!WkyukhMCtHR0ZAlhFq^BRLeyjQni4m z)?}FJkOh5a7WAm=y8gDCh7jQ;N2{%lnCrd&yKj)NX z{=#C8abAP&5bzRL@~vD7fvt*I@tGZ~5hw4ix`AG!^KJ4$KL_AFpA_3RY&RpXrut4t z#co!`b)CfFj(!&Oj-o!QL&sz)2s&P{`gT_!I>r)9Q#EedFzxqpk-l6L!Z9M?HlpeG zh^{hl`~7^sEfMyEJw#@c7t5g9M-vi6BX&s8BcM~QI-KGkzN{o38 z40fJ0RC_rri4yLvGwAD*Zj=}4!de44I2Txq_w5?)7f&9+a*$?)vbI2X(eLEL-*G$Z zr+k}?zGuQu=FAhYVe-jW6Op|sov!&u;DCnSF+}WfnL$kR(A%-sMRhPkNG`(R=1wQt z0|L|>4h-=Cv@~EF$CAy_dS8N)TOVo~i#rx}pGEn5Zcb(`v(BG|hmu)X`iQ#R@f=il z^1bH54J?V7fSdj;_%MFiYH1+t{i<^W%@e-0pd|T-ekY)tJ1Ib=JR#eqlzQpMC6a7u z)*B$(gT%-kHF0HsrO58a+zTy#V_RnJ7s>|@(!E##JPE91pp!w34#x|89*bQ^{OA4c zr4R%?$#eri6Xl=1VG-(d$rT7QT0&FV@aFcO*o0}sARC=THYAT#;#fJ4ydwVg>{a6& zS-vvZ9s%wMP539N%Va5=%y)s~M{ZODRh;TvEV6Pyf(?R`J|1>4{HG!eT4y^(r%h1>Kso+dUxwd6GvRFQ3wq-CG@vUZ_YvF0O%SD(16+H z1Li)aPwYAiKeYdHsZWI_)aIg;4&%%H{8St#zn=%SO!H$)X$7C3AuuGYy~pG(F6#BX zcHc%9P+x!BbpRsgNklOqXSrO)^ief$*V#<|t*0yfGLMt9sPeKYl44SHtmVbJq^l^mWqUAC5V)Z>& zsGFig{OgwucxF)EjL-77f5$pa=zO)=*?0kdq~)sRss6K<1>Up5lMCG^=F2E?5=gL# z;FAWb4S=6pn%kI=_BYVX;4F+FNrLM30Ti?QB%9_2qnt)t!{rYR7?8(J6hH461+=U?^AnI7>AHP1hJuf-0E4!AUAqlCs}}k_6n)ub(O@ zIBrXHiZ_N9d1`;?6M%u6 ztthS^fg7#*3lxy-8MMlON$tz6)2R~SUEXMmgdu?1;UjqM@6uk+9TRY7n<2;`4tL55 zIjrS0|7xG6mF69Qx%hs2$JWCq3fYZ->*9$uUKt{vEDZG%1*%-5hL zovB!v08}>!;b7cX+Hu=>`*>L&=>9SSSHC_u%a{taO>*}mkPmHCe)49woB|Y$uxGg2 z;1!C`GdLVWNp9=)rVZHbKy3pP9t{&2%^wm=vCUdK7fZ(zN(LAoZqb{6_1D%{xhBxf zHvy_3FI$2a6Y(n=bpRAEnP>G}*#q5ALPPf1;u@(RN7mx9fn7oGh3%ResxcoKRlnld zOBB(iMc;)NdN3NjSJ4g89~Q2ligZWnFL(YH4XkC*1tUj&mZ4xVn&?W2WAY=?qLXbM z9p7s|&ph>)^JnVh9!_!T)~{PTt=M6Go;ul0i+vk$a+ycXBv5!w>)(GaA$BG<$WLXw zps+OAas0#12upbpN9(>9LpF%sYK0qLat`37{|Vq%==m6tBT4UZoH%XcjcGTd77Y26 zDz(PRn{%(e8Q!B+yIlv(joK}S#6xgZw(yy@q_dYH?n8sdc2=B^9r?#9MeXiB>1#tz zqtAgt=mfc`h4UViaR-Sa5}5o3${~GWr68SYPLw%{bpE=@t71>fjLaEZmI-z$38ep;juTdItO?LngTNV8 z#t|Y`1SMkA{bR#S$Hjt9xjvvlX&kfek1I6rH)1?K6p_2wD;=;66Nf9?S5Xh=r()k) z;51#l{>SH(0v{);PZvIkA!xW6ag*izI$pue-0-@qe{J&=^tI=ly7Khp?6%@4HT@Hj zEEgXsIr-aN>hHI-o4IWb!ebVu!Z#uE>c-{(v>K7GiOlORi=6Mu*D;qJ706n; NtfsB{S=s9D{{iZvAZh>r literal 0 HcmV?d00001 diff --git a/tests/assets/car_tree_bug_zero_shot/images/val/Slide6.PNG b/tests/assets/car_tree_bug_zero_shot/images/val/Slide6.PNG new file mode 100644 index 0000000000000000000000000000000000000000..cc5e63a173f3e372d11e115e567ccd2b47b19037 GIT binary patch literal 21277 zcmeHvc|6qX`}asC36)AhPNz_WN|v!lCA&hhCnWnan3yqCmQ#w3HG3-iR%ADbvTtQy z#+b5iGuE-pnCJd*IN$U8eV^y`{9e!V=l73_Wi2!p|x zRc>9s1B2}YKkjKium}93z@thB{-HzMQC5WIv~o>?U-nvG)3^qMeF|Zsm_uJ>biQSP zfWcU5pg(kVPHC1f*quC;>(_K2o6h2x!}UhJCuoAdiTqT{v+ch+w3PGNCT6Jjt3XZW zg1Dk#>{!3wA*tU)=;`12dc9ilN?%Ug`{K#lJsgKeZqVJhecj?^+7Yocw(SZpiA5JL z&HKMSdepLEps|tmC13gsrz}StVkIs=vCWzq*_8iH)Y>V)V514X(MOv_0@4oC(H#Pt zIJWl^_~rKDD)9I9YcP87!M^{|2kZ!ID!+hFXb<3$!tFi9Z(c`9=TJ6OPeZG;EAA;i z3H|Vz^FKfQ|75}{xzJ%?g;c|;l_*Ee$jmu)^-=qU)VPJ`tVl{(h{xpGyoCsHzT-5`$tBr&= z>;+Wwxwa4e&;6trF*{AE$qyF6aJ~!utQm+vk2-f-c?Xh8x^RmZ=2~>5(?ugpA7jDp4_%Up!9u3hz5<(UOH0W%hGn=X`;F8=j}p4VEE^EZkD(%wv++ z$~+zuLc?^hh6~%A315Uc+S^|wyH^rZz02NfQhP6tnzE@uiu74Kf&MX}+CN^(UDfNe zkQ=F(@^l@+HijPU~^(9u*BV*X;Aknea~BL0&_-8m`2=GS5Jag9CQ zA={Lc?{g;4;lc*9Wa^N%iQE65w%0$gp@AOu_Rgq)FL|a<2ac4N8U)MY@uc*zEkb=& zwO6C#WMD7>y_`LdTVlja$~-h&^A|olAv+`J)zC`!=?-Z#9+VFYV}L0nH|oL6P_qTx zQf{|g^M9u{S*xNsKEn3@<2@o3t3S_O71Se#ARQcgLSO!ukK3fCv%tq)Ds?=#!7XO| zWBEka#tSNs?q&!p7uo%rr?q6}ze0oUtG`0D6!Y)6&-Tvw zt7$s6ZgJ=z6v|U4wfK_^(xpC2XOx(zUDyHt|k+%P35iTwt%Djr(-B3;L;Qp(VqW^6buKgta%Jr_+cxir| z0SCm=mE2&bobIE+|2X(dko+MAUFvuz4LE|~rMR=N5*`1|x1Ispa~jcnS;f1sK)aH* z5X~yVZ)c+nqt|^}-8M2p2h&bp;HW~a%&fHs%7yQJWHddPgky?|8;r|J2IlEsWXT%F zVAq{B`6F5F$6JTgrth<-?Bsq7X7cj<<690e2Bkn2hk>q-{YBDKBl6P=>&FxA=DI?G zTK$XAz_-X>Xy8Xqw4a&$-VB)txTCk*#6P4jfnKr~cnM{>g+D(i=XVuYlo4O9PLJIY z>iEakTInSpQ|9aPX}`IRv&^P2%caA0nk4=qg)|t8BE5CVZ+BByl)b03LL?+?4gSG| zcL#>=13Td}&+hX*T_^S2ou;v3%~|~}njYe9Bcx0<**JX{%Vhx;#y9v zJ5BxSSLu+`ap>m1a`yLw!%r{hr1VPQ|q^WrN+L$+QjG*+rN;&*1yn9!&X!S z`nxhoHR4+$e3HKSen*OiENVq&%tWQ>^m8CzQ~jTC{Im~WF)VYul%29xDTT+pli4c}J3)TW&f1a7_cBt166rak*dK zhx(#T%-V9_w(#>50^^&}QbAuCe|2Nh`KH%oxpj3Vzfr?kI%9RF#XRPJCG$~Tj(1I} z+;L6SR3V=Web-Mj4nNrI9C_CF$&}ve);pyfFz#f7DESkuYWf&jge6K=|RR-W? z8Ty1khXt5#M!{xu(ZSRdf?24>NDxf;Cd?YX=VY3x|28RoQ_5ycsVUYbn{PPJj)wlX z&j;O+_AJ(^?)na^qrun)z6C`X`&nNo$k5HL-sYww{t4bxiWu$uR0`A07o<4;^1rQu zA0Tp_zj)BQE0i5j@pR|&M@k~th5SHq%yB`Qmn`k&ViYsF(x$kA_$KAa``k?CO3cX1{^%mJ&6>n)9M)R@aUrtXgeQfo z?T`ZJ`sA1yyW>kYTEJ`rUq;*xq4f%BjRl;Jl@A&w&X{F#h^BfpKdoRNavl4YDev<^ z)MYfGTL=R+3!up$K<@-;x>MHFIJgF&uyJfHQ z)o977d9uk#@Z~%ho0zH*;LUvz*%c zBr~#nSh)Ct{zbHKTyAj1IzM6cTx`Z&g6wQ;_oj>|7sn3!wOx%0v5&j*<^gl#Kcepo zTxgPD(@!w)(P%^D%(-ml9rgK6YPJ~b40uyyd_wzfM`Y!()|SD2WbGtWU0eS#7Pa#P z+sey70@yKn#UJ6}&UqL3piCuh^uYbMejda=F;q*$S!#r7-FhzAoI*9No)4g@; zmS4O}kP*@~%X^NeI)2BB1X{SRg!5xQQR6Lj-20KkMV^L(nf|*HXy>6} zXY1Dp>jcsF*6>-N39t~hK0nV`ZGT?|&Vv@yrHLQ1lwPS^Uk6=Z9|!u8%L zPo#s2_-NFQMz6vYsll;Pb5)IrhpVh?hZ&h@*XZea*-MKKqacy*`+8>DFPpL{*pQO3 z+|g3m+OntT+phgRtCzfNbdvdyZdBUPIr|csY_{(5D))*W8MYlcP#*a+5mPijz+3T2 zyY~H)CsTO@f3xG719NNN3oY5!4EeJ>%`l~FJq;;?M|PeOB2UE{DSKxYzR20|u1rtV zi*!$_c^NNVU)rwF;J?J2>Tu?@t26uHQGqPB$Pmsc@m$Zlvl5MnCiK&Llc=kM_cj2 z@nrSQJ(Kj%0)Z>&R!q>Y0P#e`s`;=cA|5Wu`1DnE)`m7uOxq^*t?Rn(rBfY=l3qFS zfLJOm&L8_28UL}m#n1Jl=DkIy`Q`{(w0+~gT_rb85T?qa`EcTCj$UL^Tjxr**5lU8 zJ(QZ(BD`wwS%v3c5#zmQf0S$9V>xjLvpH(wkN*iNcAqYFS4&*rWlLld9FPI1QQzW1 zg`2szD;J&W_$KoJ6Tj;Te>vSnDRcO}W2I|D`UJ96pYG`H<43e)XrwoA&m4U9%Qqd? zzKrjdp`aKxq?e`q_vdtsU>$#Q~b|dpavd9!QD-K_3JY$zkg$s z{_zioX94Qj7wT@b5~7vSh~Z6oczM#k_OS);o;X%M>uJBy$z1r(o2xb&MjP9R5_kRf zXJ{TMpU)Tf?cLNDG7us)4(m(Mh(8*S%8oe2xo;L7_$m!+F=02r63Rutb%6FAyC zUH`h$WX^I^ZEF)PFp)JrV0t6?%>aMFr*j>}&Q%mt3a|2sw;y6GoJPrJD&q5H`0Rnz zn*?JdRaB2g4q00@F4|Gr$67V1UI9xk zT-eCl_X{%T%aMWH zDUeb1$76qr+J^mN(%xTUAe8RsTBz9zc{Xg1IMa?|gbNLra={S_8Tg;k>vmqjMt34{ zJff*9&&PQ*gRdy0`&ThVv$xJEE{y)RqG|=AKz~+Nvw{(SOC7N?pPH3wIJ(PZ;Np&b zyc0PjZ#T0{)tNuhc2#&vg3><(Nf)WP)`;erG8?mV&#nz*v zKQYidy)>lkZ<+wr{5dkB$AJRfK%zI*%%p`Vq6OGxwuJmSPV5R#+WK_6uWDp|uV6%6 zHk&wN)F;3j4j5C}yQ!I%{&UwhDnTL5&9)cwQ+Oro)dDOh`?4BPn3wUWF;<^KadT`b@yJ_bN8X*&>g8$1CljCU&%kX(sGet5w#+5p=Jcx$ zbQZ9;75W9n3%gE&<@nloBGWIOb_Et=WZjr^3KBTIiwAKR{eC7dCNKtCEp^FzZp70! zbZ2y-8_36*5`ovcwDY=l`;FF~027nQvf;b9IN$-J9ruc@m-bs1%0+Ow_~tc09IRz8 z^d8N4hU~yD4a6p(S0XN<$olhN)_{vSD8Wzxd{{$@D(}vz#6STl2zpYU3bk@vF4C%5 zwpuIm4M^FD!v3%}RJoS2F?%*;P<$yb zqQJuGzE{FsBVd@eWZ6~%2a zm#HZM_jzT!A?3xnEm|}m|JmTe@UCchXwoX9A|uvCZGUImR5b9PjBRKTwKW`JIG;a; zJTSf`-pmP@$>sJ)WG+hh!^8U4!-+|Xc2i^GNE>ra?knRjzpIro8y!n|xcXy=IrAW5 zy|`@M#Mw+RmFS0xO}wPu@{!kHq_fC;(k2iaO6ort&vi$pe2mmsGtG+nb4DwAOS8XL zght73J4*iY`jKM#@3EIQg4nZ2ETk{MVUeOLJPd6uRQO~T+taaOXIZo{EyTk)n56t5rK_fJ@+N^cUZ^{|a>_;NQS0fmk(EYCMeIL5uY~e&d5I4Mu$ih5c>Q}$ zn3_M1Cv*k$P#DTCW!ZcUA4m~?e166v?I(Lt_^6zEOH+cwV}gD|%E9f-X`5R=3qceI zB7K!E!)6_c?6GR2#=O(Lo}%m6^|5Y1aX22t^LHiZ?FOX!%V-MctG;DQG~Zje^{84& zK64P!`}Emlud`j^r%YD9QGdHwt-G1XYSUx=#p7v4EDzCC${3Yew-^;%;o3EGd|E%- zyHwZdSX-H$+Uh%NEzbvwnPp)b5!bg8tvJ%}VZT_u3Y{}JkguEZO=cNC1;cljTNeXT(J?}PAM7}um8hwVcUmY{i&UTwEh4z&}lLx8acDsmc zs0K*Z+#e_Upg=V2p*(j2f|8{XQMmM%jVIjTtKl3w=iR;qsI01!gxAG1@OxF$a6ux)cNCJ!67F^)0)q|xK3KM)h z_04ZjMBsgR`#K+>$UOJRzgo^ztp^vIE(lV};_x|Gv|WY6%)0fEs`&Bcu~r=5t>eN{ zS(*1JPD`>!WCr(9b*YfR^^&kyi6Z5wDaBS3Z&m)uloN-dmGU?ZmC?vOR7W8F{$}X5 zkV4+r{_q`7d;V}$I!I3&gN%mmxwO^#V8>8OB1UVYK~60Ex6>~S4mgm}n{Qv$pxlC~^fpr!c4(>VHWs*>D80QkfXjenEshKM zHcyuNWBSY^W$HjibwrMjyrm|vI#|xT$5XU^#WOe=Zp?9cImPM(a#6mSS2%xp6{PQ! z)_l~l!m1UdaG@GRps-cm@@N+d4Qc(DYOZV>AR_BMXi~r~?=`{rA-#&@(pr+7g+BRY z#Z^j&%>cn?IV_S``-{?k?}t z_owOPsTY@ZU4YPc#)*o^s7y4F`~oqG1ue7EqyZ=ni;}Wg>F^(YYBsn~+qsW*IYWLm zz2Ed~RQ%=5IVEzvZj&DoEB6Yy!i6E-AzXVERIBbIV=n_7t^*2jf7@|Y2LUNNTtw*D z&@6S(&gl3ZAel${3sA(LU7YK$WLBU20Cq8IM75`aZH`&Xo#b7T+sx>{Hr7g}cDmFm z9y#t9jUZ(4Z%*R+bK%Hqk5Zqo%c3WyCG@1I{IaWasqe=FiB%-$r6nHz%6W%tJIK$m z#|*2aBUOhZ+LJ{iWqxEkw%WKOP{F0iBkX0}rVGzH>Tq2Sk92a)H3aAS-k`h0m-|=n zl*GR2{pwzCm8s0K%0paU7nWl*@Dsb7?Ih_o7I>QyfwvlIvKB^9s#y9KJjVM86OYhLjL9mkP-EJj>jkPZ2(PRUh^v2 z4&VSoff;&a2Nd?rr;$IMZ+244t)hjNJAHmUlWjHPs+a%@F{Rj2@6lmJ10z%1pJ?mwXz)@A@KUxFUP!P9_g@fW!|iAM{c!6h zZLOMoQ*EVJ3v1O(W5Hn1vaq7GQde&23CNlu!wej=y$Sp2$VY+p{orAo< z6wgJQH$5T|wwK$$Yn7WP!?uDJ8FEf0aC)EpkCW{Wr7C!zBC1;{7->8jHa)BFx4(iI zD8eW5smo(_NE-ER-O(%>VQ3O2y&fYks;?FEx~d+7FFlltQ{(!AXJkHYi5=CpPTakw&PnNe}`CAW#V5EKmC=5np=Py5JaP- zLuKApnm|@RwEiJrsTdiE$mdV==JhO5b|h}n=j(IHe3`zCw)rEvbCI(nbP`GEuV&6v zTfO^n*I@SoBoim$m3&8NEIoFJ`t-P!ry6>sna-#%Vza}lif4IAEjMs|_VNSIDYvwD zDI273&+3HM?>8Os=cc4GIm%=^I>uhKby&E{5M_-bXAfVDn8lS$s_T_m8QC(``lU25Foo6 zZ$962w4D$wVsbB$%jGwvGZ(Zn?J{04J=jQ~;hY1y7lNLU=j1QB^y5-lqC3h6E1fzV z9gXl}$?ke90;g+Ne`=UhG3$0P1yJj&8?6=+5?7bC_8G;AdTnyNukiNT_SqV&#!=t} zg3}U?%;Pd=F6!_r9+#F?#@*48AQ#M5s(1`ju_Xv{l&~qr7B`4t&(V;5emOa2h`A_G z{u;1@q38i__%6GerS7#@?7LVWhTUMLboeF9ww7IXKcU;8gKA(c?GZ_nvlI1idhjy9$7Lu}QX3=;Vm=Gh; z&y!727o8fc+3xHFF;z(^+2z3C%tXrmi%Umts1}%V4cLunyw5d`m}%e<7;HHUeu-NP8 z&pYOFfR~B5`@mc*i@m7)&zWW)Mx)X$y>)7wy6O0^;^=rQ&$3hI%AEXbv~r>EjzCCu zZqjc|ReaCnDx#&o<5*aB40GfR2c`gyG)#WdrKJ_EGZk%ABzLdvvW=BxJC$p ziMAX=trl z!0=xqE}Jr?@+)iQi|9xkRRfIXiO8c4ellZ-kpcDc?)H}d-zhYu&uCz9E zML$u8(iigjzI4$<+c+p(;I|i{QeGu2YhX_~YEbnp`kie>FrObj>Im<%3zK&zYVAY= zT*CdyA0b4l!w6rKGgsz4Zr3osuh9wEE$U6-v>#UPGGzv;Og7h}&#lLcTK6#Uytd4G+Q1E7DOOik`Iw|H=Z z&&}0+Y>}+$)DWV?ugF7b04N>Jtrj&aut&REK2f*#bXS&aXlP0GFY6Im9gNDIMQtK= z241E6O-Lfvzp;7>U+xk6fGF$1g#Q zS~>ZNLY%IM?|-E3-|XAK8ZTw3Ip6hh_uU%?XneB^lfbCzFyymKPyiiAUFXqFa2t55 zU4JGZD4bM$gBoDgAq~JXQFE2q&RBllx?}Z?z+ZZe{M<8Ky~I(r9)1RKhx1i>+@#CU ztM{7@dJA<#>@EgN&Ig(75j@w)smpq999oZX0E3@S>Ef~>pO$Ok2n13nZj&K8AQ9JCzGGaz>#GK-O#O&@|J%enO-)mrCM2bn?)aQY5(if@hW@yQ)~ zr+s}um;tg;fJd?)h{e3T6{prHR`k;;NFIy{QE|zT!m%*!qhT7Ezb~u?ZdA!Mr{-`q zMKQ&>W@@cSE`h=*vs*Ph{{X;_QO5G0N2hN^I4gz+EOCP>-hxS>3@FSfrF8o(F`K1v znOXg86s>DMH8WJ%&R6Pf)ynaUs2RVayL{lj4xt0lDdf+2w~~LCzQL9FT(Ra;GLRX? zML1RWWYF%m{fXGHBu!4YW6xH4ueZ}COc!#GCwMl`e1XbBBa(YKyc{;1syQ@Ue$=u6viCQf`xz^l-eXzemg1a||x(l~8%LL;8}*$9F4n znW%6N#tD~2hU`s3=*WRReFA$)KpK)YQ5jyxUl<&#N)`?R6NHYRitUR?e*(Y8NS>-m(Ue@=c%VW*o|cw~nRW z_>3>Y5pFQyWSGf|PS3A61n1 zMA=M#qq#dZ^!@(N`{?Y|W7XRh9UoWvzuj?V3QZA{54L0-K=BLNE9eg$F>Q(=W|t;F z0az`Vz5OO+{PSVL=;F`BHN1v{M9RCA9+Ll_)n|vds@lP~8btOdfT~6sUYs(rNLYMg z_QPL-d0jq8nNRuPBON%?@FpA20TdLer_a{vcE(9rw&JD})HV$$`Ch?hp_4IfLeY#(450coegu1gdx}gO>K)Fh)IQ_t!880(Bo*!HVf+5_aWRl4 zo@)HrU~Q)IuPc)1!ZV&Ii^ZSjEBK`N5EzR9vmWV!dFoB7V)9y4P~hzS|5ot*&5M}o@nePMrDxZaRs(k}0z=38emS4MFGvF&PC?t&vYZ=C57N!j6hM4& zN2l_Vd%q4=2f!2L#mHpwQ#!X1wX0=H1~WLH-~PD@o0rp6OIWMf>zprr;M#%26TdlcR-lzrT5l6ToU6vBITn8y45Qtf}c6jPJgHU zxGOqYc#V)8;B&gdd&#{Z)2H|x{Gh_A znH}D!+5t0*%r^5FlGq5rVun!W0BMtw?1iYk6$y)Ln%b`E=&bGXXs$3`4fRnB&{>q9NFy4 zthn&?=2Xwgj(`rJ(;&vzS(T3YxqA+4GoUa&z*Axl(i6aPP;6krNdtg%-AV5YE`%lV zvIcz*kmD88sjsS~k>A@0ikw;~h(V;1Pv;MNJ7pR*u>GLE;C&xPrOT)b zYpXcg>XG5XIGXU@aZ=v(+EiHba#U^>9JvV+Ax-K7i@ZdTq`GCsE`n@~A0K~X*MVGQ zjvXSlYhBm>4%OMFE51Xg1Nu9p@o?>2 zjqxG-{>NTogQqHP^guR0d#!F)W1H>!R({q;>lISEl0^SXH)jrM%R+fSfCIYRm+fy2 zt~crPNs|JhI>hE#_N9{*y7aMyF#viB_yfZJMq=~!?XvqJW;MUSV*KvtK&8LUqbXW^ zouj75{xkuCx@_Dc6dQLw*S6A35>_nY&)vM_|A8pxdSlAxD&MmUgUd(&bT$-OH2qDT zpVuMg=!`~znD2{i#gk8_L*wZ(S|$YbyW*rPSAHt%j4MWn1Iy7#jq`*OtnxYXCm`64 zX7JvsAqH^+$mPqq*T(^bE)jjU@XhW?si)PozfRA{ddGoMssx=r31H6-P=)|ff9y?R zwRk4IPOdEc`kInqVU{BR4r*dpOoxo;^_&(igFK#fmZVJc7?a&SoaS9ZK`&7`sQc!L zDjYY2!o22G06M03Bpr-y{{#{Y%FnNy$l0OT@}Ke#n=++=o@c<36{xe(d78j0PXK1D z{z9x)+Y4=%W!%r3mHD-7+2hvlpPFAX}Gp zsE>Qme<3Vn68XhjY`mFM#_{%_dVPkw-CvduSC=ZFJS?MeDkh&s?X1B``cMVs9Dov`9{ z^i~Zr>nG9|#H5Q#6?enH!Hv$Dl8q&qrXb{^^ZYLe`xH@vbWb>3n7CQ*fZwiQyvyneKl}ZU*(#ur@J?5C(({|3#Xys|5;}=|#LG-JgcH zEIHmg-um^XSxLsSQ!aM~Y11S3h}wW{rMjNhB4=eXwe{GJzlw&suWE1HeX?Jm9w_`3 zkT@45Aw(;@#6Q@y5rPykf%h~_RaGFAnuaF40v3x+wN@iky=i>O1=)4TAtOE3Xo0By z_onxeHJyqpoFExc#0CavdF-zb0;oZAm$wpCI~n@ds|)}9g)s-`rwlo^sFlcCcoqjDUT&DLU>E3 z+7&)^+H=;WQ=hJ)jX?cMPf-7X;NLAThwZrf4P_ zN&-B)H+8;M@&2Kc1^4Htn9Dqf8WrkBk3p33esgv>&qy{3%a^=%ngJPbD!m;@fg8Aj^BvD#Gt;(azc=9O~aiQ z_@Zrq&?N5T6gc>0U>kpLQuWW7z&WEr79D5A-5ghWYF@692h`lpMu#5YFn%miJT|zN z)MmoTHLwKmWbDn0@X@Axd7sHIA?tXkq>H>6y={}&Of6jp=^Asr*qazeCXdd(5@f6U_KTec?@mUP(}-*kGZEuO*xp-4@xcqJzSk9?`0b zd2s_r(bmc1RQL+Ez5$|QgMyVw?Qi~!w9|#h_&Qw|rZc|e;85d(<%H%So*a(C4?qY2 z=rO=qy%r0J>*WV)=}Ke3AtFWxH_U(sViutY*n^(yI;df7#3+fftH6<<6Ht%*$~3(& z>jeiKQ5ET6V9dMkb+X#HESu&5&dQP`=f1_QLPU892s>G2O|EXmot+>uX(?gQ=cbJ_ zIT-``s(B}zDiLqzGMEYMS)i<3O;!-!eM>E$1atJq^2VXPP!bLm0;FND*|*x?I1j-; zTl7?rNPuw$0CXF6Vd(iXXIl9d0RD2POx$%=(-65w806#dsK*_kqLVBaamis#jSvNO zrj5AV{req`n6z|rOoYjPD5Dj1s+8%Y^EFjT5G{?Y^oBA~zog&5UXs8HkFFpqQ^=NxFvfE%hDALJdxVy=OD z=Y|$~xLs7GqY#R_0Q-0qwtcH3$@C-D8g@RXTj{5B`bjI&451n~vNz_(fcM7xbbb-@vkMSk=JH%* zsF2OGp+d0)2%;_+;|EapbXK!VfF91_Xv!GD+&~s554w_q~ys_Kwxxro^Hf}bgH<6a;xappk{taqz;7~igR=tnG zDTFJf=Z|Ma$}F;XA1mrqFfzcormDYT&G&~G9mAZ3x;#cC#zIQQl?STBu9^|!WkH|z ztvX%jyZwIpZ_4k&Gv>3n`D9lxL_oh>RbxTfwmUo7K4=U<8Y!mA+P$)|w)pVq4RZeAO?q@#b)_5Tm z{j-LfHuv7d3q$E`+Q3|GM&6_>r>G>V@ID#<5z_hRf=;$^&KCX^j56kHRiFvBl%_CX zEVxhGHOXfxr~k0wL`s3YmUsj{QXiQG1PYiyxiP#GQ7kL{@MGD47NX10PgCY1P(747 z=W-6o$ui30AwB}3r(!ixD-O3D4UVO3E=1_C%Y3$-?GSNP!G~r)i*3F1Udy6JS!rig*=mBT~1JY@0qK>u<}w!i+_3nU4m*oOU@J6@A^5mnm;dD zzclR!6~{9ykk38*LeG1gK>8VLR0I%s@0&vK*~+0qtc%G(QD&r9VuauPetNGb_k=0w z>~HBoL0u$y>9g0lt3VdE@+ri9Zm>irRa50xq56i0GWWnITq0M;nQ&5MlU(eJA5h0~ zcw?rMBlmK4ziOEf>8}RMg`+@%OJ6zp9hUmb6kF<8^%kIek|4t(-MSTKC-&4)+wX<$%}yCD7ENvBbMHtb-I6*K21;suMZs)l zBJ>^P(HB-n-~$>~X9v&b5v!a6wY&vp+#`EX?Ckomwm5jcMP0B;VWeUGu+Lg^pjqWl zMEXskX)v`C%ET;#!AL2KCNN4IO7L1d$7Dw5=?cak&iH|T>93%BUPJuGS-0U;gV^dh z{puSZ4Kn1`ti+%a8dQANX!`Z?B{!)ZKsF{fPZT@p4k;$DRleEb}czl#ZrwNPV&kWEY0RySC@Y?t&O_ zI>YK0z=I^sJvsDiV~Q7=F9U4NSsL<06OeVpOJjaDe`6|$aD!~fiWJ$=Z~%(`%T>cTVgfTycni&IlVXNev?2argvJkVo17(PUe1< z?}uvx{a{EniukM@Oo3?tz^ET;5i9Wnn5&#)gD*(S4|$^=&1o8YKJ1kQ@DGXU)EKn_ zWirh)V!HTK>l1FR@IIKYsII8<==e1@jg|VeUB}gK2AX|bXHat!IUG{`*Xp24Hv)ac zKH3jI0s5<1P$mQ*f}+0i^Ut-;ErS8a=}t_ORiZ|LoeCJRgK68uu=~82YAtHX0pUz9 zwb#*W5RFCp(;}2v=(mP$OWjm{@{YtQ*Kc6MikSU~l!%?u~-I52IPHn4w*tN~p~ z0_fF}gI%(fQo0~v}3=p!^IXa4-X_h?MX zf_#msRt(i`q4?B@fNxKhK}UgI&p8P}E#L!9;2*zA?1d)NKbHW#c~=Vd2O8G!7G;wz z5kO-yVHGR=cG7MWuSXu-^^sI$g#dqHS#?S(sg^Jc&pxq}nCzP$Asx#_I@(Dp1{er_MPjQd7l zDi7u>-U%3}i>z?)z;VacROt8I_?V3ZR@pqa4NS3!ZAK{P>{$)h(NuxvnZYpo&U1x7 z{$hMDsX7-h9tWP&odpso;QL<&q;Dy6V9s#Krl*sJ7h3c-@DCg%OaT*53naaU1Bk0f9xK3w30M zVUf~QrnthIxUO3OR0_>=?d*{$b&Y=0x-#d3I=)U|%FciUg zMbcBd!1Z{QpnlFwe}UbPnB$W_<$NM{C{x>^OiuXeD`T)Wn7TCq>S~z3fG$?s&|CJ| z-MT1b>qpFhMmU_vlb^xFNyxUo`Zu%MJIB~O1DXS&Q4mPZ91S-XwJYJp2gF@HnTEj> zt{a#%zR-G5bXX)r?JvbqC;=0oNZ?dlhL$9?fGUFn19pEu8a=*(mzw^SzBLXHgPqR= z!Y92xmFTzrYB8k_9##Zn35L#AN>C-0McE-{DSGEpfpEQvA?-?NJs;PTRc6#FeQ9v7 zgPXP0MEhJ?f2EY%&V>b|i$JqkmN{oNLKgW0>B_C!5`G0s3#+s>IyZlxJ#9mA%h(#F zhrt>x!H6geIE(|sl1^T6YT%-S`(w?~bzBJbJBbcLoy1D?SWUXKP#1Jf!uhahL7Hf% z6U3`baGL^9ez~P2R$jXPZYks=ay2i1>b8l7z2$;pi0Z(^U*(f7rlX?1&&2Gy-6ntV z+O!vTfVtyWVmvB(c8rh`Dl|aBAxQbp_(ER}9u@a}mZTI|IBR*YUJ%K5LJc{v0d5`aw4<(gIS@@OEI>Gs&ofn>W=E1>@B9K5fb}syNKgB)p z9dLmNhjrZTiV!8_lyhg|Y19%M+ot%xUgzQbCH#VpB7Kz1vk)+uZ{UNdh7yK@ovwe~ zEinOB0prpj6jEDunE+QaOncnvQ9dOfui9Y``8RO!#!EWmkKk$zaD1m1b3RywTnBOn zUBM9M^8?-_h=E!=q04FJ_d9<9quaUIm3~KDsryvtWUobt#?%TGGH-8a7{xk}6MoJf zT9$R&zVhPR>8cj+>OyeP-`^`kw?iaSM);XZHd0Nb-5w!zSEIDceaPR1t*I@H9s3hM z+x)P;bG$zQI#-zCt-pt|lVkGVZIk*M^c{{^2}lY$Y`+1**uViOI70C0e}4EsGC|Sw z?&i7NBTA1LJcA70w` opC|sKIRAHw^8`n4D0-n1;LjV8( literal 0 HcmV?d00001 diff --git a/tests/assets/car_tree_bug_zero_shot/images/val/Slide7.PNG b/tests/assets/car_tree_bug_zero_shot/images/val/Slide7.PNG new file mode 100644 index 0000000000000000000000000000000000000000..93e61c87f489d494bafe9d2d77897306a21d1be7 GIT binary patch literal 32317 zcmeFZXH=9~vo_p_f=UulK;j4@l0kA-Nh1hI&RKHKIf;URfPo}ANR|vurcrXvS%T!8 zbLw~BjdPy!eCM3?y?@`e&W~9=vzXrZy=zxpb=6h1JLr|HB+hNp+b|dm=f&S*3NY9W z@UPc;Z(ai*PpPEPzz3Rxg5+~pemD6F_>b!*&t#s#U`63r=lan9ddv22bq5#>w*mSG zt;HtmH4Ns}|3d7UlB>?fB$k@;Qo=IQ>_cKVCr73UzmUo0MyF4(%j^yRU-GYBSjNG# z#jpRW$H2e-?2bXB8hf)Qr^Jm1Q#T0N&0htXzWTsZ(|o6iWG?SiGLOK&@$+Me#`%3@ z|3%aT)~vrgN=C-l&fL7i!paZ59^r6ow9cV*CB?0{?4+|Mi3ajfMZkga2nS zkuAP@h-ZEaMsf?41=1>%?RH9V_JUj7wqn`Dtgq=kL}df=Uz4k3#PFr{ulpU>z9`Bo z9ibOZHrdowd1_p;!x~VTVJ2pE)6>fh+1%jJMeTysP)#1RW~cvrD^4*9U-9nH99VY# z2#pOJm1e>+X6F*p$J*X4o-)>Aw(JYWjps7dCeeB^E5^>$2-P9ZtziMrAjhhIrj&59 zEhc*kU!F?!aB1O9B1P^Ww6TO%m+P~nAK*{Z)ahQzz?xB6>FG3S6r4atlYDZ|HPxZ1 zqszr+E*sfxM7SUgW5Yl_x$3q0xcNwkzxWZ6xRVI!^q!Q4Zn{0ylhiNe;zyV8e3d$K z7&>Z0$#hZadAQx!_>;LzRzo@2u4CdgKZj^E7uK$^!BA_R{$+6RTUxWs*|~+6O8X9@ zE`=`|DqHrJ=5SC^>T(y+LVsjDz+Mn zlg+YS$!oTbD0_3xcZcR$sgLKBDH2TV&2-*5DM91qM?LeAQBPi){j^-vN zEKmYvC+oVr+%hIBS_Y*L%29Chq4!C&?q{CA2`WWl#-}Y*th~t&lN3XlMa5d@F`=>> zy$qGhv#l^Y;i|t>{`JRz;4hwG%|}XLWe4-~fp?3WPA+v(;Nibf$5@!c5%(k{)!o$X z)^b3l@L2lQIoXe(@sgt+&h3~xYv9-RA}$0jT@`%KSD9z4>b(F>iH z7Q$gJ=a&;K>>b3Le66%c6BS9O#ED&=g(TdqcCWJy`cHqH%&U{vv*%Cfmj*WJ{!~F( ztwYY_Lg8!p;OaaAmtv(24vL8O-`%Pe&-ZaUDFt7~eG68ux6W7A=670Ao6G*5|Ir77 zH>%@|Cpf1@cuX8Jue?%t4|nGm@REMe7E6Ob-tc#a1W1>~B#}OTX1UF?ZGh&h9C6c9yxPhtY9(^Rk*8!vy6i1#VwU zyjtdQ!*n^sIx7x(GAu9Cjhbyv(?Lv<Qobm`2C$#_18#wh9NV+36?85AFI>V6)R zmhYUxx zZ*@{u<_^JW9-BHb#oiaE$H&LI=d;AfZ7(^YgTZj0^EtXCPX+G_JY;XCTvAq67M{rY zmN7>1Gw7kiBOmK%jNQdp_s9@0m?ws1e~!Z(qPE`OPfRJnT&JI4@WD=I1je!scMwX< z@(-!k?3z5i5JA4CpTMQySM%Qcn?k_(F5>chNhFEG1ULE}niUjWg&9?6M`EX8F`M{; z9Rmz26RwT9;>?t6=gQ?l9SxQAr0$M?9Nfy>eD{rj$9YS*Sc~vUBi^M-S$KoOPsB-= zO{?Bh3+O<;XMh7?WyZpa)lbN+!;5Wn>IvxDyA``xE5ih>3e ztw0%@Kw9PP*c&+X`?lQd3HH-n<6X?&EN;F@6xv^s6W&3$NVhSR6WnR+@wwasn{J)M z)Ki5_s8X0|Y0>K#UU);e?L;(Ep=|PD?U~lH)>hx1k2t!hWVdBIQ*eK;cJBh=*=7Dx zxKZACz=C2Uc%3hqFy9N5!zy=&ogDNqwjU~AE2EIuhnvoPD#47>6`?!A=!I+EBgT3u zOZ;eG<@q2B1f9qWyG&fG;|iPH#+H_j5?H+~WTx?lHmZl)s*?t)CyZv})-F?D8#HDhAQKE z-pm}{1FQ|iz4g7JSQ8Gn8>*Gkj}(JTg)%)2rjh0?76=?*(l%dT7%531G3wintNlgE zE1U8*xUcL7Sq#l)T1NtvrY{vXsgjjla&7L$#CK;LDx{W9F7CClP$GBI;DXCp%wp)7 z-@dgvpe1`juqYS?V&}OD-p!tE`R9XSU*bhLU89!vxo+7Lqu~e$j%X1~D6aH7$4!e| z%pD>Qb6vM;kLD|3yY9`b_0h;y)a+UMqJ4Rglo)|yt_obi(ZOM^q5i1=4e)$ZioU341Cwdd zC-e*1pY9`8qG^^+Yf72EZ(a&jw)j4ZG!C7hnEQS`Cy%acYkI>d8po^8yiW&` z?D0uVV@| z^Q%L-3-{6VJW$_dV-aBUOvzwHYCKw*ToTKoIxkh*)gkFbTo_yuL{hIpQ#u)V&8Uh^^>pqsKI*|1#C?G4z2D-9+i$;=jqX2vTx1^N zw)YO%uY;5lB)DbD13i$Li;qr25%1FL_~CaPlEdJ&K8~~bDfCz+!R~6#6rs&GhsWVm zZNCO%=n`FLT@Yspz(STEKsHDg&$02LcYop>iRk~Tk+^+=uA>=EjR))fc9q${(+>FJ z#x)l+9svYY+;xN%vO``3KCs|Mw#HR^H3T*i5J*ti2K+pYY3J0(@D;bM$; zQE9y81Lw7yeJe-za2v?>=PkuqGsKZNOG7(arc8ZJ}9`>>R zZrBngc!$K43ffwDM$1_eGlzZtRz7i&utdvm5T`TvRUl&CdFbdt{2z0 z3-uf3S73V5s4qe1Tvd%6d$MRb4WPai`kwVc=(R5cV}m+UWg2#uJ&F`Y7Un~8-n)cu z{IF31;4bxPT|G&!HD=OF(^-sPZq9&&H~g!XHvD?;H^5nGi{S2mZ3B0GZDbxNNDTIa z%tV82o4&BY0BJ1}KG>utjM3TfG1)Hq$c71f%iXIuq(5KfE8&aU*9QOgjg%5kN!BC9 zaRpXFZW}#-SG#}1WKQAv@ZMh+gU6YX&SmY(w7$8yxeH)F9H{;1siQGkAL!9C9X$E{ zRakvX<_jjesj}H{#Uy*TRJDfDK0$pni}vTGYrb6&3JU%_$xRA|$+{o$L0_TvHJJ~E zzA*%-6aWwg9rz^*MB5t&xv!C!Vxpb_z;_LRUPR7dUrdQ-=6rLjKU(SPMAi6)U49Fp zl|8=NqYOAc2u_Ioy1uQ^9s}|CSUEc_NJCJsKj*OkHN}c$-G=6ZSHh#2YYp zBXR%+{^tXaXmO_AwcBb2Vz3sb<1#^#rwxF3F#Zg67MN z!mu|tXL|Xjq?A`pQ=?Bp6OoMI0q}d_dn_`A;O!rHArFG6&Y!|F9$l=eo+sTk#k<g@BuF{`@uI+0 z7=R^i_3VGGd-RMFJK62Zt1m+8MOOFUcFM?9d0$k4gb9aO=k`s>p7jA2SP$9962m%R zi#-<09(wF8g~%y6`cdxgf-0n(l{a<;hy&5U-3K?46Rr+5Te?LI7C!wgAYJFt>!_n) znk{fdoLm2L>>qGDKmj^t2NQ>KRXhAK^$bxqd8MxX;2N)Y*Bx3WCw7ooo4TVfRtmv$ z@o;;%q8l}ke)U2=r=mDI*Ea{V2PUgs7Cxb0#kpiiqArqCJDu5y{gwr~7)&>}>LG{+ z|9|BG@V;Rftk*(Lad2=V199pPp%g;NT%xzy%Hw$Q@LNpeCAIH@K8%0IO4}->@+^Bt zj`7CCVc)p89)3NO&y7+#2u~y@+~*U17gVk67}o1!)j-bI0NYRFwD`42VnVAsA(dhI>^(;r^9}bL57}qt&r^J27WujN-U%6LT5L{z#zbHK z@gFX~tKDnhc{!cQ?_7u!*ONyePm+*rnLd8b5Qo`jOC@t~?!-%2I14npfO5{e>>U7d za9$a(l_^fxmVlxe)_}ozO>!VA0@S7cqT+InGxQyln;{iQMn-xK~x%( zw`dJg>NntKJ3BkOylAB;T4Ah}Tg3@rem<42gNM&2%e1v($iFEX91H2L!W@mWfNiV= z@cCvkL)DY;_%#~bhbAqa*1-YIlQpkYfDcCuOSY-zzKoqli0Efp)PrP8&hMBd_&01L z1ibOnGJu#M+sYJpFjAf3B+lt;yz+UbOia_Sdc81xD)N5qFILy6Ot;M$VA=)tKdI%% zsmn9XN8h#(&Zb9XDFRNb^NWkkl0Il2P#JP;!{uTlqsie6><2h@Q^SHflCmu zl1@%#f`YJ(Fz9{xeGU}&e*FMeq%0jT$!br&Z=FgOxj`DgY~<}Xpw*J+qcWEN5@dZ* z-91u1kEdfvxPaf8w&ewf+<26UMd;4>+q~&3|Gdc)Ijir$)YjJ4f(OKftdKXlV~Q~2 z#%|)*^=6KF1dufoM3EwHGC8x_x+94Ks415XB4xd9Xv*1OtUkcX}R`lHfmu#u6Or>0>rDZjf0;Feqf9~*5wqmBw<&w{0 zkfg5!1PW=I2$!!})5aqE6YFZix5%qkJ#rZ>ObCWC-cOR%)(nL+5&3+=2*CJLFvg8b zzfjTaY_HcnUlS1;`ymzmM*K8bHxP%+d8MD$jSaRQ3XDrKzKC8)O&n=*L&*Lohu5{t zqmjkX-juzXH%qRM$uxO6*-BStIzN6^ixa$4u@WJ-9yfdD<2|@?t1n+paFetzfgcny z65`?sLLMrV=rAiNVKh7KcM8iR`Z_ecib}n?zN>Fib46}AC$(Bm*PG?zX*A~2ilU49 zvusVPkxU(&z9yzsvEoURb5_599P%RpsqScrB|^qzKDL`MJEDJSt#xpt6e1KPN+$;n zfLq3tP&&LY!T%ag+yf^o;xLGAp2t;zmphy4naWHi6#OBaZG+A+CE2wmco%L@qkUF( z2BOOr z@B`o9@3TD2)YY~v3j#%UvS<~N`ZH#Sx_21uCQJMV&O#F8;pIl8q-u#=_4y3KQ}j8- zvkQmSSXIm=UP|GFd@Dd)Fonc8ws=}ilN(*zclxp(PS1t%O&qaD^%c*4=prNFY+!aw zRMpHk<`I~U1Z+|VI9?)ey*I>Mi6+lnQ$Yn&{<_gSxc1%|(-EbwG2}>wWA5`5eYwN6 zHhiEG_u$%IUV!3A2J5>mSM?ZvAh$u4iILMgK`-BncqqwRXO<;T@T$@s9;;@Y=aQWc7;F%$^3C<)=6Z5e{QK_Ax(r z0r<_1KX6Z*v)G<+ay#B%H*rEAQ!Z)8CWlui%RS;Fj;$V4*v6$Ezyue{ErKiQ^AN0yefU+>wuGq7z0V!D3i zLOfaH=u0!s3Zssoc~RcAvuA|dJtniewr<>L0+q^k@=6OprHLFLT#RTu6y)U)`{#87 zw#Ta?@Hr-}@I%3C;&?PES8niudIE%GqjO(wYSn~mw#o}#vOJfAZTvt;5$M9SU$}K+ z{dLbbUCN+=7V((@`dYF4>AboJKpVn{Uei-cJ;H#p zL<*^^w3KHFAZR4ucUgFcj#nR#)fhL2xck&UB1UrNr6Qgg=^>tj#j$q^as4A5n z%QU?2gOG~%c3C{5#8wq z#Ofo6C3Gm3{7W_#rp@wPvpyeRf@~;09h`9whv_n(HYiSZS*u508WY}N-k(QdSEzM8 zV3H#Kz5+pRIOa9om|o5KQ;-ROuZ@xcU$eD^*PTy9_1vRd!Yk7{7}8tg769KM(wagP zvDK^rIT;8;wmJQ0_rwo)OXwBB33)&zYS?#BDnA<#Dz!Zs(GLizFRZQ-&;?EhH0V<3 zb<=i)LcVy;=c5<$fV?u(3a50VdmWX@OyomZ=BP);id=PUd=S{VYrtnZs`>{%fKCZ? zYoJqty!{9qn>Iv-C8PF@d;OzKt>>=yIG6V+zAF=%t#2_d&c|EN21v2ytvss=yd}vic+VB1!`cXON~sli@R9(bsf<=dbE-{2x+_EAQB*wd0TN=tcUnb`9t^vgmJ#`n;1i|Rp4ai&3Q{K zrlZqg*Teg-d;PlM^`xz`_g6x7GFmn2G|bjMxx6uUeLQ9y`TNqO<894i7rLK5G0<#T zrfcC3FOF_xbWc!MJxd3NveMoF+I~38`+KzNAVKxUP*TzD-$k${WdW#>o=p@M9AU!4@I^6oOeGhj z4$!dx_mwIs^8(2j+oQ?JGVx&R`|#w4HVP(LaVVl*U|KE$vxTw7RWMhTw@!K$b!RHN zE6c8!WVeGblbm(u6w8Q`T!)V$p)QO(S|KK6U0)_D7`(|aIF$(RX8e@ym}w`Zjew*j zR3ew?SU(QBE~lV(0EE~OYbMH}x!0ff4%$}zeWWE^7_ry1B3V4tI%u4WfnUGd;M?7X zjiN&l@bpS~$haP-++12O{h_$85UNh6@-5x^(HKwmny~IW6r(>k4#y3f63%HU5?jjs za>~Fy#uoi(SVVu>(wx<(ntSP0jl+x6tp&Sle_qwzxs#`MGjA|f`H^YVSynoFWN1Lb z`%bihk%z5ca7(+oa&nYy@=rw@9i!{9N0K`)A9U2bnW{*f(lu+%*sH7T@Kb}%S^xQk z_OEgVmUcso-c86q&|~8Xt-5!W&GR;Zxa9~{0QQ)S6F35W9p{}6fnaUHDB#)CZQ8_y zs?)UwHPln2B&sAI-SupDDR&lzBc>T~#vE~5oTdpD2e-5JnB|n+5vlS*J(leen0U#P zP|RW8TVx(v9fjW=+6di0HN z#6?e?YnHyCB#Sf!mk!v1BKz2a(3b(I=$R>e|g-3qhU;8Za7}!V(dH8fDIC_XHwTy2< z@__f#7nU9@-$QW-@VlT~A2~KFv9OIz#ojLAFW1Vn1=R=Hn6VY=%4H87j2ik?5*G!L zZz-gfglr+0c zl*!~RUJGl@Zz5d#4rSVq5$@N;6su7{rMK+y_n9No~ z<2z3fLY@0L1RuM)qqe(S2oCIs_uC#WJ$G=~e1kDg1w|z%9BW1&uCApqA_^n>)l{;y z^s=Xr$!dYKW$bRKOoPENuNW4(9aw04=mhL&W?cL;)-lFSo&*Q|xH~$`kI) zhnp)v`PzN^&nkE7H~6j>rhtB6@P|Cz@zt-~pz}Gt6RdG$*=iy6lw084Z|$(ggIC<< z4zij11eM<9rvZYMSGBq==XF~TTr|r&6Wms1v!KyPfowF`alotFkB5%zwPX3u!fr^_ z+aM(``T`bWwe0znN%FKBsvg3O<;I3Ts$aJiz(HFd4WkgwVY1=^ewr>jVIn9x6;H~c z0wTe|%;17B0FuYy77n;g0@2@)I*AsrjW-aylTHX>s?}JoGpP7oPQ~a@6(%mh1mZyi z^u$Mv6_!T2>|^AzLc0m-KEg_L5voOi*IM#&h(<3ut-w0{`Sui#c{-=!agP9O0+nqR z2h$BxmA*`Q$-XFq%k?rH0df-&ijnlG;KW*JH%#!Lx6(p2pHpYNopF~Ujs>)c@5k3j zt`CI`PD6op1wKlA;-K+J9J=aeteD}64jZLJ9ed=^J!)TwNZs=@DIfFyKd^DR=DUGn5Q+{*QCUwkJ=J&s6XWYp9W$MW__JM(oHjr3 z(tYCrfjENZu1%_nr{4-pMsu8XKtFblOml|YQ(t=X%@pOB)7J7P6jSM?!k~l(iuAnZ zwfbfh9N$h@00?$^#1Lq5`qt$5F!-1%$pof zu_xJGc9}j74(eLw*lRDdgfNidAtv6RBI>d^xR0l`Oje@4^0KSDzT&z3I`Jb9C`|;@ zRBygt&#@d_)Jjl7E@!X`(|_8!4bwz1EK}f^K)4E>{)VAR)?(jMz#KKwzw)++s?c)6 zV)vV9l0i%NE_vlku&C_QRkV@cUl|ErN{MnO$hC+t=j?*%0uX2>E*~7soQplBuhdA} zrwb!JW2s8w@|d!7^7Tj|8e`X8?1!}zaI;*n?MbS=iCq956y%4&{{wiSp;Efj(bl_e zO1%)LLQuHJl&IgV@@~%aaR%8EH0&7U`1e=36G)Si-Guv2q)g16&zp&Y3`9OH7I3~E z7^9f9(i9^$O5=)XTfYOH(f%{~2`*j(IxwW7Gnz;PAU!K01Et0SuRCJu$}{mwLv)ziGL+ z1QKbYL#ZNDffrSBXK#5t%2b?7Ld9hB5tHohC$Moc)cLH>WHvSEI+dKJb^+S<##qfK z$gK9IShMen1;IRhKw5?KLa?-6GYCcKoa%~hST_*E=UJAi9Y$)?su{`3_&`4lv@<{> z;R`r)i>r!k^Ro7RrN3AK!>y5K0C1J{W*FL78sul&E1N)d|B^|9`_OR^JG_t_T2!=o z%Yh=XlEv&Tl!G`^GPBx)qqU(^+DLN;ItO%0sC99zq1wX4P2<8m&&)5{tPc46O~dX( zAo40e^$f!HuS)4@??-H+Z__`=#!~bY#*;Y25`abRgFAo;2!K!mXU_e3}k(V0@ zUN{#8Al(wgegNr7Z0WzBb|4C>c8+x=ot9)}koE7tmZ7-BXufwyRk!`~wGHH>cCSY$ zK50X2FZatT)#QE?9qq0*nw~P0=6A-nskg5gYF}U6EodO45b1E3e*$e2EgMrS&?H|L zTOK&0=jVB*{|Y>6_aMmdq#l(3h;iT)7wU{3(}9pEv>H%}uKxhsBw`Q*UhLkR1|9ip z#baXxQzvSv!kce|zs_r!Zgy+;`Z7a)B037XdX1={8!WSXiBHY$BAzw%bB5dow zK^q~nWMFv{RmXBGeC-IZseg18n+RZ`HAq;IVE2-F-fC&HHg(gg@}{7BWu%DS_a8;% zW_I*QY8OxXYzM%N856P>?5K)N^|c}(Pkw36Djx}^1Rev{O=~Adm1q$KPV#Cwd7#q( z^)Zl-?J0a`FW@;G-ox zWzquXFNNHH*~YFx71c%(6QH2Zov-y4s8`OZl=F7*MmA0RPoFt{PyUv;G$3p6O8l zZCnB%V|f4Ai}1B{hJURle?Qcs-Mgf_&TN07(jGXJweE0>|?~RNqUrMc) z**#x!$LQH9JRJzw;GI=_x#wj_bY2DYligoG*u06kI|kx2fvrtIfhNB$px~1R@PqH} zh?jJX;=)~uUpKTPr)+X7p>6%4>W2CaV#gr;p=e(}Y84@nnQK!V%phJ8#=7T-MqHZkRZYyzn9%=SwvOw*ZTE>W=yYvGTK zazxnl{{@lQPw=3W8Qy$&M_GNRIHXuR?vDTrcHe6Z?Y_cv#@OW7>8Vi?v1L4y9N`BB zmz9a0>&EX3!Ms|@-2Es*Ls`XtYOQ%kgZL7#hyzofn@7!BxlxAVt-&<7xDc4u5Ai>o!QoW0qSZ7fV@EXWoF~^+IbP= zsR}E_0ocLDPgm6UfVbV>U6Y!->-D1&Ej|CveR2s!BY=gH9`c3c)O;RR1-3uY zbL^Sxcmy)d(+Mby&Rx*M+scd!R&)JY7IE-R^rWn@4?-|N)!+o|4H)zbnXa%*K33lK zdw}&NF#k>nr(K<6Sq2CTRQ^gV(!$%o(7x`eexUr~UcaXbG|qcT3l?U2YtCiNkDMdH z4TniE@hA~x}O0a=q5&+pqW0Tb;|c>HGufQe=4$GY|#EwWSQuG-}_x@d=dpSMTcH|t5U^j zCEZ9LWX2E;!FrA8H)+vBB2>IUn+4(kz0d1u`~(sE`~p+&#G?>QzKYC7S~kPp3Owkv z05LjqT4sXm@^iZz2uP2?Q2|ZqUTifa0t0psj%xNPyCA1Piw6v*UGF(89-i!g!Y6Tq zlDF1poxx`ze4PvGfwMO_%f9h}!D5Rv#D~N+6sWAm#%t-gp18#g6x~W1k zTnT1#i?#g~`O-!Qh%FuoW@@1-&Fq2N~g*gRe`AijbVUH9m>2-%8tCEu(F>j z|Iw~?gAS-P>fBOZf`UyW9H8*bjxloA>aEX=NmJjjF7JI)5g^fyQo|Q`{-*nm^Im<$rh6ABP8F%8%dTi| z*k}=~8dZ%|`m8nUYodN7#IFKAOfT^ZIvDO!kz&3s+3Opng!FW0c~C|IJ$=V)3OX56 zArPD}i=4WP1)zGxQtU|X#*v!c)H^wtPSsFa1H(zRrA(duMF!7}-M$i^26pcSYX~Oq z#4LJk=C>ICJ7F|L z5k?SB$p?^=uV?3<8}5XUYgQKRu0873y(nFm3mnI~`Sj>DkhXb28U&|M^^V1XtZuuVN{9^XSCM?oN%FNSROx{MSiljk z%3EI*h@BQ@N@B1jQ>4wAxW%YEF3Zl|A=8{U>t3UZSRsh{#JSoO+U8^0*5WJj!X6UG z@(Ms;?)_L!YymUcz^neOQ6)k_giNh1dnE6TV>$|#(G*w1m7Pj^ojGX&fU72TxH7br zpxPujrkJ1o8!fRnSup#1wW&J@a+CKzfjGhg%ZZ93HW;!s7wUC9I(e+N%+4O zBt*mZdI1Vn%3`%!ea?wM#RbHhk|sK2^8hQ6hsL>6lJy3sY-7M!?C#m1{G95;n23#k zr zK1T0IUG&%`V!;*xcYlRU?u2G(&Ct;J?d0@4*>5MTA41%@=ec&QHXn=u*5?1^=$MWu zgq6xMsA6ZGY%-7SyMwa3h*330ro9y6=XU-=^$3)TLm==T1(uoo%Ew&d^D3%83myX& zz7bdp4r%`9>#b%A)B8p8=Y3Q!2Ue?H8mS9fT_Ugs6Cc>rQSxYMa?1dEy2 z2R<3kxV7z{Ss|zqdWer;13uy_q<*ae z{{59V&hif%84fX9hJ>-^BA}h}*?+yM(U+@e*evX6#t3xuCKva>C2W!sTA=stnuki> zEj#Y6CG^g#k^P?uXzvpUs1KFCAd8R7EjKcob}gRbJz0ERjbS7#D4@w38V&B5O#DpG z6O>@VFB1e(<-!mpXKHQ}BO~kRKPc=dg=(6Bkppd3Tu`9+MnP=6FcU=gELE}70#!!= znh;b4XT8GDmokByGf8V!-Y|L}O-jb6uigJXiq$H;W{^g5uz2MCNmpInlmCHvqCwE* zLG7EO_&}Zmi13l!mTf?nG>=&s3S_mStxKu%h|>f&P5vnVWM7cU=MfoKKv*Dm1%#4E z=bSbfpc*NKp^WQQ`%vke@$hnKX@104cAmxS>}&l85IR!k!ltCGRtG>3#?u&I1t6nY z0j_;Yw(yXMvdaADxaC=yY4GKw@ErbPGMXA*@xA&0}q+cju(NK@+-@ zjyOP*(@b{EcrvXyZr-@YtTo=wA$ar&x;O){ESQ+sFYo+U283cJN<9As#UxP{1v6%+ zT>RcqfoAXezedS}mWL7JDcr7-3yQ`qe49a+tyZHv4d}9sbDNXv;%eg^Bh1)tx)

lQQievQIfGHG4G zr}nf}1=Jg^kD*hqJ^?Qo%cwjf)tpypVkD5l}<_b9e0Hh)y_}7j+lHE>S(> zYkFQY5xvv6Fb#o_fbjFN+6;$BWzkvfU{o2>LqKy9Tq=QhT?#4@Emp7I-U%OBy@B|o zi|$_HvxG_6pqs?WunCAEmuztWFf!=4nNFg%FTa+Emyo^=k`2k!iK!=OC37!1N)Zv?;q5qit<_Nj~ReLjm*%XT?i2jTz|DTB^I6R3~WcJUs(q{P+&G zo8tO6qd=-B3#?GPJ;A4_{NHJ%9u(rvimI1I$I)wfztm2+EDLg~jJG6-mlmSlzEq>5 z-gvOLc044%axyCrZGF70#m5sZ+zaLkEMC?=F?_yJ1{`1Vb*1ihP4mHV@|f|az3m4} zkI?X&PM_AmjVY*}z=Z0FLb7WWlXo|Tgt$DGO4u2h?bh(&KY``|lrPXZi!$>}QSe+L zM%J}H#J9KB&lP+g{T{KawVoqxfoEq|q0Snk;b^b;8s3VrZu!ZvW_azl@Z)s5h*}9PbOdWIxExZVn_e)&4 zMrJJKe})WG5{QUj3K~`e>;votaeNdxkp3^`h-`+Gn6K5Fp)2{o>m%g8BYStdu#BLR z7IpEex>B=B{=DR%dw6WuLQT|a@*gCK@okZWuHq-TMWM0qN>Jn2?v=>cE) ztn0fxfZ-{2Lfit8ids_~dl%%5zU2h;QOTFgtmtez@!?Eo3UAX*1)V|7U0V zGQ;ZooSW!9@5w*?P$arHGCUbwZ6H{Vs9GVdu&s|_Za?XRigw3;CwNO))>qw7FL3__ zy6psN8kwt2By8+8A=I}h)fo}Za|8=~tmR@#S*r5_&g#K!te0LRGwaENf;2u1u8!wS zpv}F|X$ov4dnG5Dk%Qjle~Xf_D8G)2ePcFD^&L3cu7zM!z(gV@r~y6=By#Y&g`c&i zBV=4V3bDk9>#E0S*%~CMs^CqoU#D@yj}7PCJ$o&0ZFw@ugYH-o$r6wlFRds#O3=C> zkr#@W4KXWWvXbHi8xQZ^>)+OoZfdeWwm+%dC9_9Y~KIn9)dtBN;uBI!u zRwe{5Qv0)q*+Z1(82xxjtYc2tK;GF?4DyxkZ-)fR>cU0g07XhzhflV(N~qpQcDrZt$eHzLbHcAaOVG?wR5lA( z7NLpY39`9{f&S)#rKvM$p@?=HlUKv#Hk4P*2KgIj^d`#3RU%hn>@wvpg#Q{M2 z6D|$QzHw0ytDxl^k=WOL{Tt;r8#7OO9?y8mC*zmeGoafOkHZ}9@%JIfeNbJohm={? z26q&Z1~#C>_0MDoC}i+adFg>KRZp1z5~vH;s}~)ljbHV%$gk|#haS8d296MKzXh`kv^)F_QIZDrpm_dtrJXV;6)&M_;Ck!JClhk#iU-K4T3Unz z*5L5uQMlHoS-0X7i8v0uP9k}K50R?JM^hx;hOsO6PQvUNI0Zm74Iw1|V5PrdmDMeW zocL7#>ST~)thihRz?>JzE`Qc%LMlb-CL0mu3GH`oi}SmWeh0^v=0(D5C<{Ok>U=CH znL0y*A0e`qS>RKo#B5*c}V4b7fYXL^X&q{m}+b${GBYa}ZL`Xee_*y;ka+JPpIYKim-g>XFju2ah| zEPP3fIBpX0oJO2?AkL3lM7(#~=oUth+jx|el(PaN=i8zuEZ4%`LuVt5D$<`rMfwRB z<-%M~{_0wl!Q9SGP5rEp`m3%9$6rPWdrD3L9sS&930YIE@UE+Nt}ILmXIVe9 z`~5WS>vb1HG9&fx(3>sgyZwzN*K;Pe->F_MXTJuGPW&+^6gtYt=-4hLid>{DkJ5Q> zmuGn~MG;W48(hDSjVTJ4DU2d3>%8i6w;+J({H&FITzsZ)r$q22W5k!_v0Hx&%>V(x z$>2S~7nw}Ucn2_j0cz=XXXeiC@I(OzeVZRJG0khTn4uD2nW>hCD=p?Wg47^zs|(k^Q6_^pqw`9i>wBmoYScUNoc$1Rp*O~|gjRl62E;$M&H)?WpS zyMX>9Qfr;TO$p=UXZ{f(#~^zGPN=)|OOmNQEd13jST$mGfYP9-1O;&Dn7QTeJ0&e?>iM{poN27qW3g6+2N57fnA4%md z+pVf<+-_(1<)J|tgNBWrI7dec1>}WhNMmQlu!w4HYE5 z83~Mtl!6IUDq)E;lI`qb!r6_1zNPXevU#3ar*!VPguLI{xM3?PmX_rw6FI7f+0z)` zv2cV?+sxSDb(Osvp+J91k>-W1b{uM!5w@vE6;Uc(^UQ0_GN9PpribL?=8q)p!y8aYSJ+3L;`3thp^gR)-w_j|N(`gb z<@?>Bmzyq5$Iu0o!*2&(J@bzI>CwfL!@y&(b-Cx~K+1guIj`Q%>Q)ecu}YzIUxBG3feZ}Gkkz774-vl6Iim0yQyR*M;yzDIe|x(6n_4Tg|X6?TeS$U=C4~uDFTq zb>$4=FTF>=8UyjsYy-Bcolw_=E$@}=J7c=53ACZ02U2gi6N>G+A?XT?T~1iBD+Zo- z>VO-c55b*O)+;xq_N7?osGsU1>r|YI3^XB$n;M%31QgO9K;j1@khjB+(>^E5WHQuF zqXf1I&^Fmb9*TaABOL%2OR};apU9$*qkb+zCaxm6wU6jWu!c!Jtj-~`+3T1L;!`Cl z|Kd|p@3<52B%BzZxX5#Bc$LD>BWF!S&c1qPqFJGWJVT~yYK1Z?w=pQcMPPU11y(um zti-&Tws_YngzrW zCsfeFVx4m{RPLsQQ9d}^F49u^o|FFxEJ-IV-Sj);OL9C>7sTk0Gb52i_dwre8jQc3 z8LTDwl`>Zy?HCcd$dK4So>H-R7A?2V$f($Gc`=RDuIus*K&73vYl^$%#t%$^EWB0N zv|Eq85}m?POFb>*~U1V5=E z`1ujI<+v46)~4Mn*}~t^I0&zH*=lz{GleD|V5Qyq!Wc}rKdi)}c*))LB<`6e{=T;p zWpB^P1%^ZRLYZyxv=ydTX>_EM1h@7=Octg@M!_IBaMcmI>c=PJhkTy~rNPD6oyU5n z5Y>k{>diMEB#z5wL}4F^XnuEh7=rrG^kMcRN{?45ro&Exaa7FA9I#O zMn=KY&`|oEPkckhtX6mY?Hlk?Ck0*T4(7NI zt}XKNVnH2t<)Np^Me3_w_|X~Yf@?F?jbGqr9za@>OT6~SmUMpfZj`_Iq<))vf(`~w zu$GRpHcdF1Pf4|DW**W(VlF*zmeAPB-Q>&p7&n~JskhISk@eld$?NDaic*go6^fa% zg4h3Gh!CIKr2{Aep{%rO>MS;MfY+{3o%fOwM!Iv$eG=&U_RZtM;OB<4{GW*H1&V9x zo8=Dkm(YXY(F6>J)?k8H2YE6`DAH?yjtocch@_^b&j0uWdiMfn*kCvAS2R zRx|i5L}TX>}u__C-AV_SyEebg{IpPeSwhnsFZsH`U4YilZ)VuAN;{0P3HIvC$5 zU&+ph8tZB4gNgN9bH+fEP_K)M`UQSx$8^&hkRIs%cMn}PbZwG;K#>d(sU#&;)s6-X zPN-K2bC1;ZEWTV#IwTzxDH`Q^Y+-&VTbV{$Qoi22XC~R`P04?d{_C{}aZe(#O{(d5 zGhN}>j@)QpVX5-*@>e&;sX7PA0W1qJr^FIewyV)=uu66FdwO+pG>bL^9!W1W7yw&V z`#m*qFycou0$&9Q6Aa2EUj7){;ka$Sw%JiT`1zo^K;zJ%Ey}*KZR%PVr1O)>CO-j< zh*PT=p=h{y9%y;|zbz zm+=j7up|nxBHze*YN~1i^F3)hmDW!HznP+xNyl$GGf+?EeVihw@stjg);*TmZI0hE zAKuhV5mOQfzX`v5rVe(+sfIY(^x<)9GAsJhlS$DB<~2emQS=J*1O9TB16`uow+>bW3-QN1NHP-;NHl>u2XI$Me(f~8ciF1vCmk_@ zlzSm-bm*HnosCaglXxYQ*#6Y_oX@(vQF436)>K2HW~z+cS(nI14W-(StAp8cNy36h zH&3B!9WY5nO&1Yhpz{CI-k1MVwf+BZ(^VuFMHGruW`!tYDJnOj3>k~cRHkH(6QwdX z7#cXlmCR(ytRza}B2LB=jwI7L$vk{tYoG4z-uL74CwzY_d+)RNT5G@7^Ywf^U(fB3 zZ{9Evt7`pa_XSvN)oS;4I=Qyfin`woadtcisTXYepb|K_;^i*c)d}n?#0%8S>NCvosIK>rAs{$lb`;w$vhM_#00qN=Nnf3zTb=Wk1LimU^c#hfj&`Bz0~daJ z!^adLzYMcG5>jjvDp$UX>*kupJ?jr#ED7u`aY` zzf|X`AW2`0Sz`t@8mL-EZ!CDt+XxlJ)D`tFWU+x<(=so|nGco1meH~qN6iA@*Lq9c zieM)_d(cJf^rhY4W&SSqpB~$6 zej-rX>*8H~k19xl6kMy5q;z}K?O)G&Yv)|qTxzit@Gf~E!|}N6iLB*%L4M=6(R}Oc zy3a|EJcS(1rQ*x#1nEiDwZyrXwy~~hdOv1W!=@{dQO|ERxN&Qe54>b?g(E{BT93^a zt-o!R@FM9*VP=En-71bw@6Gy}qt`-ot2o-G9|Lt8dVOZEbjR@h02WKtiH5wN!S`Ev z(q?4i)93%WT1x3~$y89@v-6E=+4WK%R}Q<|xEtK6#Dr_OzZ0NgQIB~dbZ z00OA9=<`w8C9_3?8j&>1;DVcJ!$!b81U{&wz7VxkM%GA9-9<^}djKkef5A!ZPx*A7 z&DyeK`j|`2Aj;TkYI|0O18VQRIvS=oI$-D;Ks^fWYq0QQdy5y=>GchA72BK`qFtsa z4Mi0S3wiy?2he$=kq`9c41$XGZ-cyV1Sb4yDG8IOI}z z?n%TBSiyF4jwR$Qt@AS$BDd-Vzczq$4-^qQ?{_(fXYKs}eR$*4Wl9bcZtjLjyTkjK z87gD$u3Hcn#FwqMebftz0k6#|^-<`u5aICspfAFPl* zKIDX}MB*uYb`nMRrOJAuQ}aSmxYv*Xi$wYoDXhq zTaeijW~NiRA?q%tLWeTNi#?OUPXiq6zmLM6Uex`_RCVG=Cm!C~p)ncMdbporg{|*BDm~TvVM3?cY)HaecG)fSZo5*MI!{(yOhm zp3CgHLSN0~-HKHkq-;#L=Pw1%7(o=16KlRY^P5oiuc=B<9%lkGRzSm$qxCJ*o?s`2D7qnAmmpjJ?-nEXOSpAv+C;D|@H>Q032%b`84Gt7Nf>17< z(4ig|4&Kbuw7=^ENy(TbpDge#k&+%2fHO6&s&0$z`0F~406{s4n}VI2o0w2C?0Nn zg`~IH!xP|Y(O}8yff##i?_aLs%HC!UUq4khWE;C@KJ`2?aASGo0lo?)@R->T)n2>z_h-6-gKBMK9Jh-2O(4bBTzV|v2_VgHYf&*#C3M4 z4TLJel~Bvle_q=t*~h!UR!kZVU|~r@YD}zHWL@sSth*C?ykxb(>PX7m>}Ix;gSa9j zDGLhCZ<*}Z!*r?qFv zW2|c2w_$tgxg9?ZofW798~jqC1N#&ec}Eb`@d0%Y(HFk zUv**))H!ZvRDg*MTGUm$_2Vu?fJ@~FD3p*YjuIF2C95l~ysCT-&Y zw-w=>{;NjL%ipX6_LO+`8v7)3`N@^%m4eb@ zLM{B&$B&FeCqUp(jQ&&44uB(ZxJ2}VAjM7R1j={*<16a7W7+VZOYGtbjjc=ew`GS; zF~!KE36WkyUC*ZdeY`bA|NEM`7+HYd$p{;o9ED3=G*M?@{O+}X1vy7K^Mz>Za`7mR zxpFX(`ag_Ef1Wiy?|oG^Fg4tSL_@33mT6@ItjItya{uUpDcd*GPGs%h1WM>w|5Ap&N z0mDvPAEDO1sJW%>r~cOqln7lw@GPwOvt$%cR3-X61TfZm#}~b2kf;nDU<^d|05i&+ zgSQu6tns&r{|;uvHi?uzru}#h643<|A#~B8!Umyz5s{1aMu;uonJeen_Z8ZzPk_Vh zNh)gFMRpEgOOb#_DDC4GCx)EO(%VyP!_yqTwsQCfaA!c{xh z%mV1fDfF{=do$9n(94QPb?P^PFb|X)v8tz%?hb5EOrZ)~-8uz|hO{>tl`3yepIU(X zhMXV(dBnQ>Ovw(z=6@N~_CRLdi;UbITb|^zRqhcG4h6e1bP(a7KCWzI2aUG-m7k`K z^j%8vMXVFDfS6$P#j9n~j~-RF^%frds;wh1+alJ(67dxFr40!_fPhmEj_n24!)4}8m2({Bc*nH8hiAnqJy^uPXz1rjbf?a^qa1Z5%7InsgeF0z!veQT|3!6}r zQ+wt=+XY@5jR{Qj?c~wroTU)xPYHarUQ9sH0VuOX)zwnf3*<@qJJGrVPF1nR5|KI8 zAAL+RR_9-dAqCDNvoh)y!0bPOuw^;8AjZa{!j04NInV???`!&DqXJ4*b|VjrXb-Mw@5eU9_82Ct$3^pNu?{m&<7plN&VVXg0nv3e$J!xm7~D$=WVnJz%dq zxBEUX+;|3G!5m!TyMj)fd%_(WxgWU9I_kvotv%QNF$XmF;(CypO}L5E{vrNLpNt0m zG14C5XUV4*CZp>)72JP;xELFMw6`Kfhq+ zhOeSGNp8ZsZjVe2Ub0G1O*=tkN#gpeB>75nHL0B1Y79ngF!J82__T&XHoU-UC+TK? zof`3JOt9FL$U~RYi?6BUYoI+ER4B)I1sp40#R9;(`|C_**)|&_DPPN>2Q^IwbX@)r|oYo2&5T$puh=zdni} z7jXkdD8g~T3RJ~?AH_-@u&;6Bc=ry#i#{7x(?#F6dH6Pz-pn<2Y#|J8%r|`e?M&}V z$Y!uVXG8-?vTnG31$dBX0R^)6p{(0beb5iR;O|!83HcS|MIflvETLZ~VEBC1?#D53 z;H~>|yWC4v#k*XoW=(nGI(1cb(#mc)v4Q;r7+DbaA-VvY5OK2Q!Jty{k@!&H!)Wa! za{#caiJphXNJAUbp!SM^F|yDT2NJJFwt1><4ls$}WCN1V-=OaV#ki%~oCkth632UZo95 zFY6`!W3KWARe7zJfAaZKM}*%$XXlXG!-lQLKf2<%!?#)fqS4?Q1)BHaD%)e`viir% z%a@y$*Xrus?%lMPG6DqAJ#&T{1Ds6y@TN&-`j%;%GGl})3`u1zSPZmPNkyq}FaECn zja28P=q`yV|Bl9Y(9FoAbb8Esh;Qcm;*cWL8U=LVrmffygbi>IjU_y{I=@_N@CqG| zFX-Tfp#hWF0bfWK7S%t$X)N?cU^n3`lq&7g0{p!M zol4|tSr*R6?u7}7{@8Yr${o1!Y*=HJfU`biXi@8P_t)UaS z)Kv0YNxhQpR5EZq8yGImmxiBL73ui%eXoT6ol6Qp{P6M~WFpL<7)n^}ScwZ_pCy6_ zv9~24d=u~mP(q+Ff2!eAavJ1~|6OC3x0!g93gekwXu{`+k4q+wTe)=^+Kjv6w1C^p7dP+FZ(FUTM#g=Sjyk3rp+ z-RrI6%(Hje(8{4eq);d<`i3`(F`#@dlu~E1(J~f>&KnKH)S3ucmYvf3=Yh!`5;m=! zyx%!@=!>~kyu+sEywv%+os@l!k0G(VonIID3Y0X6_0SAMNu>IC!$PkKrDHEU^yzFi zKl4`p>Dh`VNf6VXAg1hn0U2#$V8!o~0x&B0Clv~#rDa$ELIT%TM|<=~?cAZ^4`x+5 z#FTomK3SL;47ZL*y0SiD!H_woK_U}41%@%c)c~!z-LX`KFUQSZSi-@rQ!i40gZ=(O z6w5H&vpc;Y$47nhxJFsDM>S#_)k2}e- zfGpaG@x>=7(CNlM1Z@xHq3<&|kmIp?G`hML9ugio2VS=RGdFcW$VZ&ST@&kaL7*84 z!ya;d(IKX@E_qReDt&|0Sr%)0_`_g!jJare({4~l>te$De~6)%AQR6EaIv_Y=gGlQTLEom*H`an z;VJX{AHoBxI)MW{PUDxK<;uDl4qJF_tEX8&^I#-aBY_W_5@&^xlD2~l+*gbEFI-lTqZehenl_&po-l0LCLsXhB;CD^DAl(l# zvJ{A>+RRfRmhgE5uMm!%{LIQeYjL)@r^2j@5bECEO{9q$ln zF#7EDkP^cz#q@v2YL3x=750}!K;SI)Q`E>lIrjnIf1Rju z%JZ$5oG8%f+}E^)^e&d1u!|dVWEl=Zs@uzFZNG>hX(lE#~55`^z|NgVNmo;-9 zfWBkQ0zUrl_d%@K_3MWNRoP7WVSht;MFA6*$BjZPNjQqp_zl#5>t9}`jR9SX|Ikc* z3So0IpmUjP{P>uy|NHI(q1S=7?u$WA4ZqenQ9HnaPB$xwQJs^4t&*b;$AoO7#vx~0-PqC1;`{>eXu&$DlxAR$wj(5#L0 zI`jdpRI7`F!`Xgr?0UBnHv%#Q4)QLbNc=W5c}&-oorTsE()6|(>z=HQy^;Yf&r0j- zCMm|)&$RX+s15boXwW(M9%;Y0ALK)81Fgce8zAP(wqtdiq2{gwGfK1Q4E$?M_CZI~ zxdNn^)pj|=DRTn4X!veplxVT!NQzDwNc+ReZ2?f(5FCwvHOl_re9>bmX6ZK&1C%*S z9J2!L+<&6;rCR`$)2*>Lcy5yosw`TBw7o^}Z(I5wGyzlEC~u>tqAMw#n^^kRSjhw+ z8yYc9$rZVP2$S}*kPQmMKhufkTQOM(o(}xPj(5-^OxjowJL#sBZGYr|ZDfg0>J`vo zWsE$h#FPkD$pFeXpkIw`mgvdxZqM9)(}8ujzHed-LIMmKDZ4P=%||gF1UjwZpfo?HCycjY->}<)~Nt90!z5 z-FOAgF&c8-n(KF)idW^!o(6qJ$nkv=R(7<|wszT{8*Uf(LhF7AwgPdG{(_!`9$;?U zN3<3M`lyfJ^1ka(YB$-tst31)w_RX`=RxUXHy8Tmy!{<0DhO{VwIJT;AOY0%xdG)D zgbSK*31;!w@<{S-jo-!u=VxrQ2t@(2Mvc~i0CanpF>f4s+J~KmKl9$JL_{&WbOD;) zN#{Q6ZAuJ38HemCSfIeLPQX-JcWa;Wg@SGFlXbtceaLoLq^BTX8IV;^^nBgn!ip*5 z3%Bs#@YAr^Gw{kz1Ajhb>|_r6K#Cd%32a5wl*@~A4reO$m#-!$uY$?p0)jiG8?h-o zOR!Hsz=C&+5S05Pa@2@GdO-~Vk{yNVC$&+)&l;TkPN8bFkGo9ho4Oxc+6Z61u&5F% zVZ_XEb#3+x+_aR*);iLNVIHg{cpYMe&t9j>Nn64__)Kx`(7OUNH?SC-7GqUmEQ(G< zhFw^cgPj8flI6qUpP&K?VxXw0_z-oEF_h}U9U%A@97vFL?g(k#zoW`}Ng_ZNor_aY zQIilw_Lgk`exp@!mn4*ID0PBwC+dIafOKb}0LL%l*TlzxF ztJaSqDq0N|cxZn5i;bson&m>#!XN}%W2AD2pCOMfo?=s>@3VLwtA=QY;~3pJ zgz!TV&w6e>Q!Xv~5$?)YsDEw(Z90U_Tz@&uhZI}uvSOwa6^_{wZGObFmg$aj zUm0Hs`8r8PuN~raYy^f$;_0Re`Zb`2mw7x*SCT~|kkq0@LZX`Y=6qdK*VO6y*V@#~ zxk?HNejNc)g6EG-aY0aU_tD9@G@^ZzAL#Sg#Y9=F%>4NKXr}wh?Djcgba0nc36)$` z#|f%@oXcAcZvwy{pzwftln$)ht-U2ud6%0#nsHjgGem00i$?MPq_y&*ARKW0a7uq^ zbT|%0gOvk3DAEKKK;c5Fe{2)lM?q^>9(8razl)1X0p3Q~9agk4&UBV@z1`W`$$g!( zdV?wwvgd7#o%9H&?#*4V+wVNT47o$YAT!;j4;%Z#V^UO>1-8M@ptH8`VU2ZZTU^Iq z-K^pmOqta!j&2NwL-AlbmMcbocHm>?g({;0ZSpBf*}fthup~gzXa|8Gl!|-hev6fH z@j?H}rLLDiZ_r4PX5b}x0oonim+s|OA{9?xpYMg$rd zObjOZu2qV5uG4QS*usS+so{H1ShVg-Sog3={+%nZMct6|5Qp;7Tk)5km@z($3m zV+u-7O-N!jIrtKA0VPuzi=D$|MJbod&U1fqxa%;`zp#e&qPlvp(m`{fs~*6b_2^=R zN*vR1ZEqI$;5>~C2B55>Xrpft{2U1N6BELiCbCG!$ZH@=>d0LqSY@`>z%N37$Q(4l zh>u-n!t7BiKepvH2oez7cyrO%tZ=!0r*8_0Ctt(88B$>Uw(IjfYHz}Tpw>T;OU+c! zTenZb7`Z2cN*#Y`Ztgj6kZ*ou2Jz;WUFz}x*XF(gB)aPdrZPA#R*r`Hhg&Z1i= zd(Zl38D-jq&&nMRFDm3cvv&FJn3z`wU+=&Q@$DrtJ(>0mP11wd?gcCRzes&t`v64H z<=I<|jjkL&;b;ZLTjWYyWaf0p(k~=;)BX!afF5aSj94LR#s_Qn;)wv*);g227;KAo zjd#r;Npo%CDFx5Nlup_ghMywnNEhgfgs==*x@5q15)9smBS=h)D;cp$JXhZ~rtJMe z!`%md5GZr8^X91W(C0?3JNt)spDZM-1+gUj`%7|&PzK&^RF>stHcem3_<6Ay&z8#q}8Klgvd%QvAW zZOx7)Nut;wmkc5lD8zWM!)F+_Zn-CVCE>jB98~gV-py>XK(gEpJr7`N8C(o2Fm*u`T%O*=l6e2?d{zA&?1R qbotQ#|7bV_o?jnMCseQeSY?$iBi@*mw1`B1I(k^|P|m?KLH`ezix{f_ literal 0 HcmV?d00001 diff --git a/tests/assets/car_tree_bug_zero_shot/images/val/Slide8.PNG b/tests/assets/car_tree_bug_zero_shot/images/val/Slide8.PNG new file mode 100644 index 0000000000000000000000000000000000000000..954d34d32e3ce15086be66af38f56fde818c2e45 GIT binary patch literal 22874 zcmd?R`9GBV{|9_YQ3>r%6gm}^aFR7Kq*4iG&EAG2dv<1~R7y!J*-4RI_I;dGM3!u0 zpRw=8HjFW2?$U zJp|bR|GTbn<2v|5P2?g6{K4U-r+FI5sNXvVKi1o-YpWy3mk=Jtb#&+^=kq3R2*Ov1 z{>M?}^!XNoT%}w%qi*13ImPsiGNfLouoTo~9ZoUjFLb@JdX0^26LYvDDEODnHJiU2 zgY4Wc9djjYIp9iob0%1B+ZWL@#}5l|6zx&FymM!%S}wle%uT&CF|qe_d~?F6`px8r z2J1-qgoZ++>RX(Z|Kv#9r!)OjdqYmauSwmJkaA()%sku#0rnlYW#cv2V9f)B3w|8? z|L8}}cL`Dt{h5O0QylE+tq8{@6B;Rml;d3;dpEM>EGH+T*0H_fmX!ek^PZNhd2Y3c ziwnVLAHx4E{ETJU^(Xbx>9+jl^F+bF;4GntSZzd`lPvER1hLja-k6BDWuoI<#qd89 z^;`d3IFGzhuOqJg7&?c%k&{d&-Dck(gox>zS`vQC{sVjA=eXn%Wyd_%T!A2$#n%6` z=XWtZIPpFQ61R8bwd^=as*pCiz2QyQ^fIUZ{K(&Qtx3}A&yUA&&t}+OG6|xUXx#paotP5(@~J=9$BGjq)kO=l4S!_)}{Qzcnv({Y6e)P*~@ReW#$ zK@Sd{;?gnxGu`fWh3Dt=2*0yCE9}zNZq-vq1T=)<)&rY>tH-HH zU11F7{)v!H;&`5kSk_ozE7S28Tz@9$H8Yg+Vz0_>mhX|=B@yByy1z^3vXV;p57hqW zWjM^$;;|AdJqB^~;N5aFjlbRWJ1a2dOQFC1*x~6RJkO_%;F2=lsIs+S5e;AOtj=g& z@(=*Ke)@7h$8^oE2VWxf1QEZH@EWaW#)RJ=xP;|aAJZbw=iend<>JcbPV_0MD%m?T zqBsz|-i``yqu1;;X-{(ooDL<|#ADH*4|&d*cX(6mwE-OaS_2u;-vlS06S~R-c>44= zem-#!_VSZA4GWWO-(Io&>LnQP7nczMz{_qE`P7r~f;*!@2V0zSKgwm%0k1CVAO)?N z6Q1-PO1n%u4<+4il}3=4>(>=(Z-;nz-B>X$0EXDe(e1MdeD%rU8kGk@4hABQpA29U zy|qcUpG6AJ1&u41&aUm`!U254)p7WJeC_w9gZPF^pGoycz-Qm>)~0xasB&Im8u3|# zgO7JB%zPB;PkQUbUhNo1x85EIkVp1wL&FirS77km)3WR>Z`VO$Eq}o5*tOYzisN+; zyn%CD@kV{7_n5MtW)1mcu+}|+2`sD)Bgu93l0+gHM3i&Avzv@L;df_m#i2}6V}TWg z`U=AeSYjIG)H^Q4H2BZ4OcowtR(~$&j`%%l&iI1W^uM$6F$3L{o;DJzHw?x$6D)j4 zUFja2uQFuzu~vL-0}K5rzP&pPr|rs?dN8z~Bpm6TyNKiUO;uFoB2#dTTIVM68;Lh` z-U&#-n)}a6sF8VY{~FG|aSDet+(^71yL@f5J?BG|!#fX(e*+~K=TuEDGxif3nNWxZ zAK{N}s}R3pL^YG6<@0d$&a{7&inn3ejqRG<7Nv7xT;bbzVG3qTkIL9lPb7FN}M7lR{WJD@(%?{*jxl=0b3k z$tYjzWGlcOVGf2+O+Gdu@8f+fJwk#qOWi#6kxN?%<%y*-SvLWvgU%_oiEJe?PY-VM z;}jrZmn>()rEvEgIV_h{iXaKJEi8u-tvZrP zk}6^UaoZGbC+9hlR{NhxzrdLz2!27L z8ChLitWu^jhvWV=ez*`^ERo6^l>vdXIP3NdOG+I+wbbo4%g+lL>N@mS#TBN<9<1h2 z#fAeDnKjOw1;yBcki*){jD0gnE}mi(REl+8mf#7+=)(X&S0?%U%? zZfSLDk7fP462vOfOmZjrFMY?T07yiXe+-Ncf3GiG0b z&0LgQz0wJrUr%ta;h-%?pCOLr*aYi@`yoUMVb#BNHvjHVs6W@AmCsZiVoj243=9mu zVJ^Xb?pR~+W`Oujy%l5Wq%74~TF?64)s$qS6;5UK71+D!$N&eUqRhB(N@}6HxsTyd!lmZTxla>93F@xQlv_66FLU+?t0RWZN;jijr^xEk@ zHKVw=m{!G}V+)sa7aNmlJrrk`&IyIPrgf}KG`Wg@y+@)2SPvn}#@^;3^-$8ZTsZ?N z={Lww5}3X>F4R!~SeCgazwXSjOyY3fM(Ay=L+HkJP?x1y`6+Dc%DMYzRkOL^?Q8>xOYS>RwU5ofeISty29vmcD!T& z$=t2QXYI+Oaw7yGnFFa*p{WMdg>psN9b-2|R<1JMptBks|L#Npf<$wq}-QmJ%a->Gpv3TCCu#Zl}RizdPvi1zG#idzpQw&kKeBA$*-*!AIl zE&LIjO0Uf6jV$z3FR&*%{&>D;HbfK-VUxUxd<$&CkazZOb86R3#GSk!#(^8v5Sg2U zYI{rLDMnQvaFxD3ae;x!+p3N3cF?o*xV zn(C(?%ezb%iN%$N0h<;*@mjdT4ydhz9XY>+5*m+-xDWVD`+LUs_1xcCkr*Jap$AXm zn+*xSO^FrhsU!u$BC)GCorKa4CBnOL>hd>ADB}hhVqQB^UUF4^J*58rB917i(H@Ds z-Q#rs!qtXsW5RXcOxmw1jI+)r%0~ai4qIYt>vzl(h+3nAviJo0)XBMwD5FcYro9#8 zOCNoq2w$Xrsj!W9Z>v&W{1U(oA^wa#_&5ws7cTl@cR0~e5|}~PFQY}KSJcO|E#>|z zISr~SMI$lKaJ#|OTP~O*wh7n-Ia0-VLHHxvrI~6%;hYm@M^LUs$=^K!CqGgstW^qz zxzIe@@Hbdp^5+`R11^U131x}>pkyxuy;{B>%p^*cq z!Mk2>uP_0$6n8+!a@S;I5A4?tq7f;R?1QUekad$6&mguX zkag`9l2+kFii<9WdC$SY;d5JQsh}TNGLRlE?+_uR3cdrk4H(SP_QTc<1`9H-8X5r-P;leY-YVj2ld!&*d_LYJm}$0Ixu45K z!VH;M+xo2LHi}DEzJrj0W4EU2+^d(_l0|3RW|wq~4GeyTpRgsB`_Bphp4OZ@!g-Y` zPjF2ou_&XjXyOBx(5kclc4}&B10?2Q7u|)92CPS8oH~H(;%f-tw>hsjWYHb4;|1=$ z0CEE1V7ff9pkQ2sj%(=?>QHWj95R@+$LI8* zQNbRX3$v>m0X39AfpXQi=9|_jR$FqWl59{Ch(CB(8{sy&gN9lQN_Dp$;vtI)0b`B~ zM>qO9;VsABk7a17=5LEFuzrvJ<>8p^+|Dn1fkv+_c|o6!@HMsnZ%NVn+qYGy7Yi0r zsmlEgH6O4OY!a<_VG~(Y5}moDgu3#R@+;k#072h$@Gw4LPywuA!)CkLw>CJx9Q<$Y zO<{!yA7K8U>(Cf(OydW?J4hPP2BaHQ)K zGxMjJ)2uszQKXtC_Sts-kfsW&PUFj|cDpvpZ}B(*L{r;wLeDKtFKv zKX+%!Zg9Rl$~CQeJ(8}{%HGJGBRu3dXWqT#HdR^;h1eylbo3N@qW&CvcYZ)^y`Kj) zncCbZ8GHak{(n}3`GYWOCjNk zr)p=_IZ?zM=Ty9H@t@x(f1XCTZ9D(7>l>Nv73#JBg~=^Lk9CTFDNh$S|G%qeUD`0i z@?5BXf1D`a{NIgTGP^3&Py81!GSc=Od!usyvxZXjHH2H~zsoge`=3d>LqgcEeJTV26hQxSF&|eAWiWyS6*=J9rvty=P$g6dIa6& zWmTA%?OX4onTXUIy8oCK`*!0!8=S*iD9@VfNv8&N(I`=S;kt=^vdhCYf3xpFI{4Xt zq4shXVV~qo_FlFi#Sh2`t^H48_MQrY*T6eyh@h(=Ro4Hk-6~#ViloAx5S^p?9OgzK z*=jPs>k7dpcUSA6n7B&oHS13TF0EF3>)_fwb+xDe+#^8+=mNXmetM6aZH>4`dViJ< z)v{k3RDb>`ASF1|IT3gTdMLcnd6TtkrsCRvCdg|6;DeltlWf|Cl8@L+tc4HKj9JiZ zY0Cp{n0eMpH0YfY68&8W6JzVisHQxuN@`^^fQBt!3ThZ+^z26U7P3=k^SZ(>SaEkr zL$*lr#;s_+6Y<6EEvfUN`1Z4Csr;;QGLwn10>Wx)h&?Ce(Zr@k&DTM?~j6`V`*|_Y}HXC-&OoG07 z|Hn$=uVv#Gy6nxUyK(~A$CWaqgG>mS7CqPRjbJvmb}lYB9Hrl(#xgI{=t`^)Z18;X zkjvSC5E9I+RUjCZ()}IhmSY*rrL#^JQV(DLi(BQU*wgjaLN4MU7phF{nc~X0&}bcYB!Yy(sQsS8Y0XNTa(QKw1sMTel=WBh0{ zv}17n@O!8lpwI6Dp%W9u;-*k8qYHQ9zaF+m!cy2OmK$4Q5f!tk5$Mh{>2gI?D4WRU zgj*cuDRkZypUjZA3pJ!S^M8ZPQd55?hS$1Ez=W@kElzC@BLJzUv?S~MokX{+wrL&o zCD^=ItgqKtoVw7gTV)H2sg>_i#%qDLVo@m3`$lZOMgd<#rC7)&*omLS9fY;Q!p`=& z?1Q-*v{f`)=SdqV$jSr=<^-e_l5=PS!$E4CqVcF``zc)B< zA6H15yGaSrIPt18&%VHiI!>!*U%Rp1`SiFaWml|S>|_|O3;0uGks)dDxpELaZpy72+MJ3Q)d8<2Iti{?<$rQVfeM1GJW59eYN&hM$zOt_| zn5~nlfcUFf?mCKpV??-a+~!Z4^mr;wCLR4@*a7!ge-AYz8;zu&W{7wxwlZ{y>17_iX|brmUrO~dEI)S?!x6=x^J=Y*AhDp4 zczJne!@;I6jlDOLp?SvE0ep2I?+`P37|ZhmHvuF>Gdl&@3?5C8@9E(Ns+{1ZIJM55 z6=R?Qp|27(sC#0?O)mF!767j}=z$~!Y{F>fo_(xrYS7A&zjC0*g) zix!-$TlAzi-g{>JmwE(ZkHE|u8ru*)Jz~W`%IZH?c)KBVRsHGTl3=H5*9Z6G1MbPq zfW|5IFS%8=kpYSIU32Y{IQtUO*Z<&t>2;vXIs~lzT>L;aAUiMhu2VdJg_H0nmpnc> zlA$kyTSyS`M~V3k2;{hHb9*Y(g9K?sKSv0x$?dp1 zW$Uh}noyt{QA#M~O9U1Z#Ey|c%A>92G)Q}))%Z~>^bGGe3stDg2q`9E4`7ym>aH&6 zvT`9RU9yG^Y1t#uo|ZK6ejGf@!1o{ZLbxy3li##C4U+#2fP~%n#Rud;)ljERpH{T* zbdlTeFPGnF+1#`XDA}N_^9ibN_e3T^75W)3|8?-wymy{NPPJm*86Br-ajNz0Q!Hc! zu3)pqo*GIN)Rp)QRnv=9q>J)z<{qENu!gzOwsK*&{-74q9h$v;-06?a!l)O|xN~_;tS2I)5#Z!t1nd;VTo1 zCFQP-!4~5JE#~WU|B$uqM2GQ${0H3kcKgSP4?Y(X}7 z%#2{V=sn6+Bv+U79qMkyFU(B^Ts_Cj1hl*E(X~6;Z70WLc%R+jCbh^hsH-O=j2$CA z$>dekH2FK_Q10VLjv&-;YI%^+O6@}BCx~FQFIMM8jPpFp+9%9k+RLMMyo^3v?$k{k z+M2;>W7sPFIlOT7K~%X)!lS?VW=~x+vZg&yAh}U{iA&#DTaa64zi=vUtY}huqPzIX6|2$dzC(!Hd>UYO*c&(UB$Kzss6JGpDSUFaI@R= zYN2jn&Q^W_bTlb<`MvjpJ;{|cXj2I7n`Ncgl znZtA5vGXVYLB^qCBsm$!5ubKjLJRi zC*I(~8H9ToG#rnb&BJ=eiecCigj;unrIMs+igu^7It}Z16(hO2n?uwP6uNpB`k>(D zp<0pEAD@WsN9QCnbxLOKeJy+}zSlU+_fxhbO$XFD_U3*Huh zz4Y~5=BRLa!BhkNxj&*OW&*=9)`l%nRhTwLZPLf;RASAtXUbX|riN}@Lz)h`0Nb`h z-IhH#qZ}v|yHxbAf_t=&lcR^2?Hfw@3>Z*;(p(VNjJ)K!u!S5qgmU=m_j~MrEU^M> zF27f*3(vL4|7gYvf2X=@6MpA~+Xy$C^sD_iH9uehj&xyke(YW>3fC@M`oNKs(#^{Q zqZ|?LI*^qcM1931CzS_>rIeDx+k?$biB2{{sZP_+2)@Yq{4?Cn&MBxY`1l=Sl;KR` z|H`+wTLg1&^Y2>1g_Z7vVTIc|#83R)TQM9k{38s%fga`cFdW5$wV*8IA;l}_)f)7; z)^U7?UVgZ?InC2e-tP!wS=_39wMjzyjMYI4Y1TRWtLA2y0^~-zxuU}GqUzjydd=x6mm15j)EY>K$BE9g6w4k< z*_V@In@Z{0`$6I0?57hFdP{Rox%YEY{Dik7$uA>NrG=pn^+LGmdCxtXvNAa-dxiqB zvH8o%JKjr2gjNPyu1)L~RP^gG=w>W`L-RcQGtTcE^75M9=4;5|rBw z4i+r-{FM@;PpJ#8kk7d8;y^Hob`!o-`y*p|kIryz0c{=r^^OX}`QmWWbykM#IUPTzSzUfJf6Y{$+WU-dWt9g3NNYhvlh^-)M1A{xlh6&o#@&#? zJJyv|C{K}ImbVxkMS%w`GzkP;x=D^hT`=eepYwIw5`-e<@iOJM@K%qp!@(}NA% zF702+PtOswqLqCr-bnFwEFN7)cCpu6<~U!a;A+=1G_NWUD~23=hj7Jp?_(@ClxN?& z4fd!?JNmD~=MTDwRA*N@PcBRzXuD11@Y=w%vV|I+U?<-g3pp2myY2TN)A#Rt#>jT- z6J`nQJTAxyOyCUsq zEV4z-bR*n-cPQVRnUNWl8-J_d{bmz0LRKQ>`HOgPVrxvUtH!yLxU;J?B)ccvU z(yWYbd1+NgaB|ubr;JdH285noH03v0U9C z>JF*gu>iSg;p$KYST}sjEMCEdxdBY_C?ehJYV(|~MFd#2FZ1pA_i>|NwE)ns5H8V3 z6v`(fTa`MDM&-(9loM`lB)|MuiQwM(=-RZW|5;PZ^^5Qf!R{a&AkG$BIsvsM$eWV- z)6?p&vYD>&2D2c`^^R8!BImu<36!_q&nfm=7QnY`1D`g&JPCj|HjZimrhZ-QgDO|Fi4N)_b~pz%{J`4|DtQN|WhxS<*!Z!y&&40&E*?xxH1%p9ZFkG_8>bOZ94 z345~12#B7S(q>JPBL#EiXhiuJ&bBWL5Wp!kXAaRF5Yu~H zD(Q9sPU8`8+`LT&Z0GCKH`jWXRzK^-qdR@%%kM!Gr`J!mQ0PJN3Z7#CSrz{>XZzNv zi9x=~d~g;(0;Rj4m|PH&s`f6WL5U~6^bVHZ!2e!f2~Aa!1NEyu&zSNxkCB;okj6S4 z?c%bm3Zdfqj+syY+k}ZTkbiqW;`BI!AS)Pg!f)tQn@yL(bqmV%c_GrLZYQIWtr_X% z7^96nkj}itU%q@^sh^ac3>|+uf!ntXBonYyX763UZ^%n5fa5W*v3CNezYj5uezFl} zvR=bcYEn}o&+(1C`Br~1eS@$2pK8>_EY632B?%TJ8Uschj<5WRd)THj+d1}QL^bse zAkpkLPPU1&uR#r1C@Gf_@9Qidt!p^6yz#?s$C@9iH;tFJkVfK!O3obb_OwZwAyo;` z^N)KvB@5rvACM^fefl5%aN?o$ZnOy{tE{neQt4Zzhyb9DJ~s^gYT&&A^n8VjW;X+S^rD-c z9J*gdmTY;i@7?bkPqL?By1u=B zq5x;2=tjQGvy3$?F`RZ4yzhRNE*Z`$I2*Nswn5919SW-tY8~$|&csH!+EYJ-;dFg9 zhh$73`mZRkw9#T@gYIWzSwvOH_&%A;XJNPyX&-@wKQ*KUn8z)3VNNelAAPAs^d%aa z#(aa}B9x#8K}1gr;e5Hldkgvcd@A>^pDfMINj|_EOI*83X=xmyc@Uo_{{Y>xT;~Nk zkQ)8_cu3d|+nZFLI~iBykD(xgCfaUu5B=EE_0F`v_v6J${@3dVtji%x0J!$V${`;^ zA-QDqtN3{ImF08Md=7oN6c0vPIU?;v!0?{rs7KqGzx~dP}&1 z&)ME-vFPx#;`5dMm?t4ZNYe#$v!D&#yD){f2b+q^Pn^^#Q&@80DS_-lNyWURJs4c z=c4pPb3tkp@ag>yr=%Xh?asNyG1qpo(4BmYuPz#7IuK-&sH_aIX*DmOd@enN%7cyM zDDLzzsj>`ot5p!MZ0o-r;>RWIT6!RyS6af);b^miXyyDfZ{HHYD+Y#{Hve^I8I@fvywI_CVG3 zS-u1h4w*6y4nJ_)9=_-iA#z^frU`o-w0xC| z_fo*WIrf2m3HJ572)}K6b0ccLK1sy;UXh zu@xw&Ql68=bRrPPTOpvWF71OJ(}NY8hfS%3yhWPUztHag&t&G*?)5# zehcuxw|PAnX8jYV1wqBsqs7Yi7W!>zq(t?rMm!^1b*@n+eET{73@ic7Yf7)+ji+J6I_E!yvIGZ6*C|mb!KyHB`#s8o)3&IV9 zsVmhFXz}ne2|&TkinNpgk!?|}C`Z#{p_M0A9=R39mao1Jx(0+mg@z|0KIqv%cbnks z9ndGFXlbXGK&y4ujSGJX;`D9a4|=o(@hU-xAc$%C#=pcAt#o=Ala;abiD-DtpZu8C zu}-ZG_FF^533uaWgajVaRdQ|manb#&x@gbk-TN}-(}A`vEP6_~y;$MwE@Vgp0y|M~ zVrjTR!HVz-I)}^n4vXx2$7diWppj*L9INe2 zNDGBfm!Z7a8AFRdu~V=R>zE1cU4R;Oom*la1H?r^FAX9}CuqY1NgRkZ)Y;2Yd`>aH z;hm+uiiNoG4;-Kgw_Fc@6LF!d#aH^yRRdoDblZ$@=R#{^WllACYaglkDlHrX-Z_!_ zVw#C%To&4^vREtBri<3*+R%HocJ+hD7M96Mt#GvI_AGuurhG-Ft|J)co8(dg(wB3 zrM|~8;&xDYs0D51D=AKyKsky#b|F#doslnA7nE7@IVoSTo>IXgM@tU}ppOZve}iTZ zvFJG=ZZuyAeEqMYT*sO2h?xe9FL&Q7TX0m^p@$o7XL;{WAz9sx-iI+b1NAT$AQc7n zmx-LVo}hG3KwH(XOwI=uuPX7JnREokGK*FS);~z&F$lf3y}k|&;Bg+Ruh;x@TKx77 zNLpw}{sUFx;ZJ5Tzyeo->%`X!b@xCWi@KIz86S$pL{XUa4?8R1%A2&$YqyIS3unJ(n_!B?l4#RpVnX zZr{pKR{=4QuHrR60%`1w4!#H0xLx-RzJQsBXBJlw{0>Nurm2w$Ld#VGsZb4`$=3nO zd~ENlJ8_!1+BI2xGjr;x$T~!E11xXK?C1BO29C67+Vpw z9I(}x+hm>!U@5$c(TJ)9gOhV#7Y~#ae1lg=ZgX(JLn$>A*dT(ug(?64{=<4?1>a()X3sZd+*PB;+3b{*k+jplsGBkb%sGLjZ#U=W*H>oBV_uQ-6;k#JA4fjqKyow+u ziwv#Ix^n-KM+wHzFWOVI-(Y%!l~2mh#PP9Ljg@)STKhn0CjEn^Czdiqn@D_#e&SV~ zxHVNE*u+qUvG~LIp18_u{e0Wv7=4eH=bbH6*{$&pi7Hvjs|tE&5qMU5t8QMpOnxSa zSB_fSut4n%O{*HzoRG5G(R9*1lZ)pXJd&Jj$C0O!-9#o&-nA?EV&?i)j$mJUjh1bw zkx?x1&4b}29>ZSt$stdbX7|z85mzF8ba8(t8S~^8yYL(F+n37Is(;^{Rd@v*NO*Wn zJ&nU!_f}dwmZm6KYw9HI_9;tJv2316U$rugWi(s@@dw}4Mana34-rqz&!q+qSS6h~ z+<J2MVH z84nMlUZ1hAv|)-Gn9oUbs@|iWA5f0nG2iLb>>gWK<~s9Xy(RxKJUoQPN=k-tD44rB z!FnpK#lbyI_JeaDdjw3;b1+PgN^zEB$6kzYOAT7QPDnQ!o!@WV^X_v4Hz+U!A%rq& z@giQ6;)WVwZ)bLdn3i`wcB;F|xXthv$D@v*@uIH;)CB}ea7&IuoFqnvw?Dd=nQj#m zC3FL@8o`rZ4%Xr`t%?u)3|oyU1ShR#J|zVaR^am^bb3_jOyD+ivJ|-MM{MBce zeQ()O$ylau3N>c+%nZ!45p} zS5dWiC&GwtYgoFI>FXmFQ}0&@GL$}=$vgH3l!{LW&ACpIC4`D~+uL#v>K9w;dm8du z?!=_(u~AYy>|rgQf1hfTcjJe*SO(?k#Bo>Z5^mulqdN$gD}qFsZeC@nvCSQDM;jidNJotpx=e>Rt#Yz4D=W>m9Ln3nrJ~E@L zUGh*VKeQ?kMDf#WD=t4~(>}Qb1qFv{gG_KqKxU}~*6}Pn^nusmk_W>$bfe1h9J>Rw zg=AVMl&q`H-Zc8&kUom0M8%PJ3>oq-KQ6u8-safPqXg0abK_Fs(x~^;)riWSs2A#vOj%ciT{0npZ?!pPrY0IavH!Dy zY*KC*hZS~exOkRdju$oy-=?T&_6lPPKCnZOeJ}Bt*GG?EO!j6WjJ1+__WbX9(v`2Z zctGHL)aqdLda4kskv2gIi8cGp4)Er~ebyCb9_8)+3y)DFx4tOC)dyM3x=EZwv(#De zQ0W$0a?gs-^YR=!jYOZGb3Hk(%{l+)x)&wYskKeosq^~&-gEl%C=i_F_>=AclMY^O8jOhQ3#!=kai(c$Fx>qu3rtK@^J|RDzadv_r^uHvL z)B_WS>{T&R_OlDQ5F)ExvooF8Vi8T3ZqHc?{$Hxyeznem%Wrh%6@@kW&OvwD#Ff~r zU0^-(tZ$aa&s@S~`d;%as5MeZhr6rZm)IV|%3{Sn9Y6!lG-WD`Be1(?xhzbBq(ci_ z;j;2JhS%A3OQsKm%K14A)Y-?3x^&C|{K)eHguvp40Rm;-S?h^9BkWX?Y%Q+ECybW4 zwBE!ixNEoMzSD4`oVsTSPKO}(jLn)d@bsF78ba%tsY)3q8u40eTfLE_t_5HI%@J?@ z>Txrg3~KoBksYR5(ZAU_Uj}~mPfAx2F0&A9BL@3sE#+24E?%z{@PYSScPCApB@4e1 z(XOr~+()zOc5&&?rEz|zQ?&7i!tOqa4qXW}3;O+kX7v*-k z56#Fa@r${+-;`(76}D==5Pru0xdq+x<*kPpE!_`J&U&AfUba*$o)zqYj&$apvSkC} z!m#!J6lGphBbB)rc(9c}y(7Ps&#D-WkmHBgWnlA%hv>@MvBXGje(9o#v10nCeJhfq zGX3?C$$J|J4>D$NkXN6L|G>PR_j27`UJ0#4l{{aod$%_MRYdR)4mTzZhng>xmOE3M zf0sl{a7kqd7ag|>qviYk{$6obYx;z#V(@rh0U88;x1O?ZI&HU-G%VKGYfPedNyKan zHa5&k^)o93oLA;GH)SlUP8D0^+x+O)9GJhj9nHm^7k@}`V&}sz*A51k0(5IN~L_`@(nuDs8?qQCR_l7e>m z>r9)LM8b4I+{(8pr4RvJe5Ji++WRlG-|!6%v|14S>7NzW74bu%fE!`^pDFWQy$Z&= zg+-izk72rSSL|v}?erQ^f41GL$4MVeqQh|pR$QeJ?LC^uCfc3UiLX2l&FFO{qU`Gh zr{hFZmg>lC|FK+8fWHeRBlAz$UQ;_#wuKEIR&JgU@9<60?gb71`NEf@y8^Bmw<&L-F7uw|ZOK54>@D>6da?%43|0rF0^we#;?NVtf&8^Qn7do&! z5`jHeMwM?q>YY}#x!FoS`>YMOghI~jp-8q}4qJqAVDfxP8{m|+_5DJ% z)l%6f_}a`dVe?%56be;BIYskvXRXStyCS)p7lu99elP;s!M9(*ZZY&nh@xP*m?S*J zx3PGfUf4ezn86s%f0+y@`6g50>{3*A0$6&yPfOFH#IB)Np+859e{Bk&J-ylU-+wHf z;2CM`mOayg^5LjmB~pSpM-A*lj(b7^OOEK~--?g*eO^@1^<=2$ zv2--J3x%4|Zaq!ZB9GnyQz4!5@>+PV4Z(YUQgd&f<9-w6g=kW)fyO3?yroag)03xe zUkUj=p|RgnsN~RsbI8w-VMY%G`NOJ-ADEr1mu?S7(5>TRoh+6N1g+-k*n#@>;^bP1 zaIfyK4+#4-Yh#>b@x9`aPyt2+(<*j)KClX<_*AV`7xFHl)7t|L_*NJn^8g<=vSt+q zi~_3&KDGHXwx>@}Ff$$ug=Ss02g886K=qedEUw$XD2O2|Xh<{UEyDV0(Zv}L&B&l1 zhEO&vEdwtCfXl;xtHgGeWv)E2-WA|jJIF(*GL~b!NG)H6Pr|*S-vme^wSO((Bg_gs zN0kZijm&;kk;7Le;A^XYjqg5*RidEOMeW~1eKKSfUlNV+Q28X^)%W3MDhdS1yaXY2 zH+WWvAVKj^cB@40$=>qjQXMFG#1l<|j}d6g^HOa0X(UT=s=6**UC1Ai;++6c947jI zmFI|peiOXxc=>fLeyN;pM6$CNA}yYp{x{L(D&LL@arV;3cd}&2P#?x*Zn=!?FF-@Y zZcpgX(hc-8>_mF~0m=x#sF1Nr8!dlio2L!+CRVou5|@X(cWp#J04)@n$C(b3ve&MI z(1_r=UIbrXLpKXS&gucHg4NqV2;CDd{aE!;GotO<9{3{s&*jlID^9%pZm41Ww!wCv zTn^|Esl8=?s|xm_oE6`Hq}#e;Lb$yLj$G4cU$gju6yom6XEqNd79@Zzb&fKIKi=qF6j+ScT=OVT2lsXB1`Ka&w{JemLk-uA>b+O>f3NcIpT5b zBeaMBSg>_mmKWRYK}V&-I5ZXfGWOf64nld1AntOB5W0RcdowRBiJZ%REHiJho zy+E)p6XK^Me#ArT_Ee>=sF=u6)L$=D8=P9y(sgan8d zDAe&-`>JkI?!x593DxUnim7Fp<-20{5uo#n;BP25gX}1B_XW$LqDOXq(jawDUb4?< zKW*~%Y1Ge$8t0(o)6-FP^&`i|2r|LW4IYtc?qJ*Q{%%XIG_2Vd9nRfSP|;}X?f%>a zvDxY*vD*ScEjzov=^+~K7s(Lrsid}atxCG;O`~st)lTeSwUc$*1D_gEPx4#J*mu03 z?OoPp&mOFAfzDnA1YoMsa}_1+es39KsEOYGxFF4GL~t;nR{Iw7S)idzll|XqJJ^@Z ziF89_|M@A{4Vc=YTmi=2(KG6y&oek{4U} zaf=Potd-=&ws@;^EO9M4_Hw^w*Z3`j1lyjYlh2J#+|j2bE4v9FtCxyg(N+4=bFYt8 zgZ@FnE~>S(&o-Rd&yK%aA8PScy|lZZNsUh0ku~-AfHsLR`T0d~b&KB|XjR6p{W)Vc zY4i&^h4MWhvER(YHz)t+ZeLyBQlG&eiITT$i-q3^kAX4tOP0Qio0+plDsY3}JepNz zqKR%dO2!;JXQZ#(tg=dd$xaQK{q2IJJZ0w7sHmJ17GaO6DuSWkt7_P)D^7NC(m&xw z#ep2N9O(PPjg_~6>;P>><1$x!~R8nqk1%jqQT1Iug zs=1CjI_7&mkQ{**mcPBLr^XeeQw6bA)>fpyM5L0SRfpg|oV-V?W<@w_j;L$FIZlZW zrtO*l4h=DvKk0W|)k=rG(sws-;TNJq#$SbnAU>4#+oqeasB zcQw&q)$NYacWEm|lN3a;lR5ROU%4zig$7{Cp1$9+k6ura_t4V$Zjx#YS~Q4*r}vbX zqUwzZHt^i=y0cV%nE+pc4Ihlkvo@@BjFtDa_xl#bmcpAIL1(z{%Pw@KMMPn9@=+-b z^N~;z{#I!pJ3rgvD>VViKHM4IEaLN))GF&|-%X7i&`;d3MmT^orUE&IGPV#=ERjS? zlSCN`pZ^Gj(Fss_5P>Ge3MA<%`;R_TF zh`k{%;XqP%+SD`4Rkq+yvpbF+VbH3QDDMHl4M67BU)QNB&B|x9y3Rgr=0bOP`_lo<5P$5~eeWubhGip% zTr4!x#yYZ&?REDX&1IvH=@?2D_GV?l%}RAP%ccvzH9-*n6|3lg`b(6GW-hd}e${3Q zzrb1SR3ez4I2zd7%hH@#@0v6jpVIbk-{VqR`Co3bs^9cL#lmbTf~rO z-py$WAVW-<)3-pKNECX`mpNg4MHiZ$8)BZ~EWB=Jc9|a^m5=o`D5ORdr$Vk371NFWYe(>JcPQ}6*HVgKTqP!4?|{JX*AN#$w{q*> zEED+D%m9c$f&#`9N?2~&F#;(6%iub+Ii-4U?hK@)+U}iGGGtH_z1}n1ov>2fUptKW zOk}LmfXE%eyP+hsBD#nLF3Ut1MMvkax>LOZv5Ff|xE9r)bKf)bDxu2f`Bft&KW&)n zR0i6H;5rrs*bB>{y=DBRpG0;o9nVpve||Y(N*M2d>Iq4HOx0yG%DdGP2ZvG*yz~E& zrUyA@Gqose? zYO~u_)p2bWT$4kjujx>Uc5w<|{rd_AauV5npj(%LdI_uC)Njpd)y}D#%Le^6deYCD zFlO&R?u0&iL#pG9%(+Vcpe0I-LnDW*0T3om-kV+JbgHt|eu|=Ab>fM#EwrXF`_#vz z-?D(3fBty^Hsy>v!mczO_s)TY1e}Khd_xukEHCy~4Jg5sx&RBZAJc z7lZ;N*SlDRb&owR@&As{0{#ug{Epv%c9y505~WC+k-RIuf~8-5Gz3%xXvEZX7) zJyTHd?EvrED|MaO;Oy>2rEvB-ZFbOo2OSf#t+rS;&8gF=EwnWlcesTDebXPBmtM22 zkv$BY7kX35i)M}X;~9yYY||~F5B6}L9e3pipQFw^X1^*!rrS?XpStVb{4mET6GBr( z?9{@tgoln5<7=4HO5fASvc*qM4lJV0;-v2CRIJtNlKvEHFD-<|x|@r3W?_ZGQ#r!+1%KSa&6^Eaq^#Mx3x!`WcZk+B5mcjCSQIP9__zA!o|=}<}Od|g2)i1>iF#Wf2z6mcPP^^Z0pKa z7}YMbRIE*#mR6R+jMIi%$)vOxITmSbF^q8>Q*C8C2wjvzPAv_EA<8fg)0%QVuH#g~ zg`5p0LrmG{{apPKJHO6+-#g#?obLO6-tU=h5mv&h=?o6fX9bck%(>{m(jayxt76V6 zIbJ5Ej;z@Yl@H8=VpvrZ9iW!_euxB8B=j4AmAgTJv-Wv=b3vNt>mbdPk*AJ@@Khrh z4f@Y3D>62Im5FY>q1{}nk7rIKDRO}M= z;F&~%BTSovtsxsQm1cV<4+g80a4DT+nQUeU(&f#_H-N!}k7@#y>WnVqQF_SMET*p6 zm#@-7MDhfX9l+yR6z}ZRGN*N2DRaV_4xZTA>V)saTqc)Z^=RJOQfE8O}{kC7Cd^ zEv2bAjtw9Pu5#cht$oGtZ%77;PCMF4at74;kcqT#2G#RAa-1S6K-(>Js5JFAjT?cn z+!@U1hL)d+0gTc@54!X@#&wg+C^QhlMJ`xpn8FqQMD^GHm^o6!p{j2SobD?)FQ3+|N#%Je^qe(Ec#f9%W zR0@=X!gKmj0L$NwLLkUikt<7f)(xYPR>+{$Kz05X+!~d_8uf%U<_>C><6Kbktj#tTtT`C+q*HFB&G!eR`{x_v z(i^dGoLX6~_5=0quHDs<5-1QL03=~$J7QgF*zjRa1r{yhdo-yd_)qq-j)nd!qB(zC z_S%{PY3kz5H^}!}6BSx{ADkG9@fa``IHY~$49qT@DO2OZ6Ik*r)-EX9KowGAf_FY; z0cfzE2_u-SL;ube=L_T`!DD3G3xM%X2Rz9Og=?EH`U_JZ>Z<7SQ!{`^NE+t?x^@Z2 zd;R{2)zq&Doux{K2OY4R_pgPzB*ng|H;g1`_v*a`c){K`>>%^??1QzR0IIttg}a%E z2B1L)476WKbr!K&_fEQQ3KkRMO1S050nnXC0?$f!Lkflb!Y?kT_t_x{JAHo&mO+bb zb=-di7d2c+6kBRbT#e>%sv>J^l!7&65I{U|fle}G!3(gDunZ7?9XN*&v{vEx@!@z+ zWqjX+K!%sH_e-pmLi@^4Cy5Q(dwxJ&(U^U!>(aq$x{^)Y!ae&86kBp$YsD|+#Y!oUYMk2OQF6`yx3nXWgUi@TzW`eYf-Xsae>Hn@O% z+3*nQZL6R&?<6qPnLoC{kqq$f-|FgW@K>^p(>jtiIfGeqBMW55!v#J1aFiw#iHjLg zI|z&{Hj@Gj*zkE$4&oO*Tl+VU6G3dkLWP67h`czXA`i54V3>e3?!~2AIf5GWH_^p~ z5HSrl+Ht8ib`Q2iM3QKf@P+Wlg_-b}tZFY0)b z1r}jJKq$&VS9#)*OZ(}W`lN|}xDEd`FcmkwbCxZBk9=1b_i@IL!f{}jOEUH`!8P~0 z!i>fnMA|lXV(#`uchEA_BDPnR=!Ee_raX!8@jLC9Pu_tQ>R2rNqL8fn(ipb$I_yS}}BX^JU?Vusl*LPMF0Qo~|#&afp>>b-2-|AFy z7NEkAaQ!VTc*v-?&S!ITA)SNk)?0N~1f)JK@E*VCnrR!w?Ce}5U8Rs(ZTgl2N!0hERnLWMh(fOY9e%{af|L=YOIG=M)Jg>~nlod|-ks$RQt z8$q_ge=@f0V1$2NE(rbSQyt)UD>W=p(yQhd*w;jLLapfGh zn1Cebwtd>$I2U*E+k|X+ynFYhFRxV(-np}5=g#d6`*(}m{FS(UkG0;1ONpPiKDnYv zTVExoV`G0hCH=UiZQ0YDR~bC6;FdOxtw`@m3nk3rX?%26)T|=`VEVPK47{Plf` z!@gu>MWs53cQ99WV=LGa1GeC4Pe{*@tOI#6eNE63@UrJ|_s z6?0ZnR@y-c>>uO0b5cg<-Rw4m9SjTRg~r`R+Hn$ABV*gxg-pPEX$G~hwTTm<&-VF} zV~@4E&w0Am&xsu4l5lk(e@$;6UAZD)=Hf(>)nAR|t{g7zWv_4=zO`frTWlc^V7*s+ zG=A6mY9Fn-)m@}#`6rD*N0j|jUn$|S*6dJH3Nc`RrcZ;&_=@Z!A~LA2j@jXL1t#e*Qu z(ePqOT!F;W5bWuT!n~d~iRSP-O%L_X9qX|6&X8<%7=!hiZnLH1?Ls^KyofbO_GQVwGQvQ0%P29!~J_V;|?{g63Ln`zA=quOow>)|e zR*BAfSNLU@WC}St6|vm|p4g%3p_dnRmdtf&l_&`u5ma20aO4!`RLXE6WqycneNf zDCU{;vFo+xmfJ5p4oa#8R1%kj_;^|n^!cZ+HuoKZR}whx?sJXlwf;MBX`vy`8axgb zX%ElxlJqVHKa)^D3C0%=ys+ef(dAVuF90sg5^%o9=Iu6|+rQ{CqRBtg+m=rf;$pkmltTr2Woq87@1sj=#OQ z?`bzQe0!FyU~^kw3+~Iz@+31r&dW>ih)6Nzd5bfdy^_n$^2d-L&aSgSL|}-5Rd^=v z;H-Td;M9OIGWoUyt!}q7TEOts{BxnpY7;H>xKwLJPdD(ti|%W=1Re!woeZ!RthV=* zd^)fwdN`y|Y~-0?c2LMesri&CyC@;8JAU=gk~o!{p}eKdVcGNEU0t`FnrD55`kJ10 z)Z?bKqm#TJMOj`l#|;44?A_@yzY2dD zd3;u3QovDix_J$;>It}>t{yRI7nOVxA^G2)tVqY1lsDwr%bnMd>c9lnC zvJZ^UbziJ(ap)HIKi6k(B-G_#t7jD9qN8}hyKhO~=$59(8ueH!2keA;I5?4kU+|tR zI>%C|(%(4qD>Z#*`b0{E*(>#wJ>2(PqY5|Pa3u-=++xo#sA+jY!X@B06HY4o?a!!B z1sQ<2s~4Oo1y#6Khwy>uskFvS1&KpdxGvfF`;i{4i!=Js`- zaXthUj=g{`Xh$E}?EqjDQW(<;s8E;|`DW;~jQpp98#sxz@W~QrRsuH=%Lqu_wG~;f z+<{gf6@R{8#Fz*op14+9 zGCPE2+^5<>DAllS_v;dc)Mp>!2Y=5@PN9}5BbIMa#Idp5M4Y+wLv%CGtBrhBvE(6x zdu93jM;7nj3-2!3yBGXD%79f)##e}aY|4GE!v;4EOz0$>rjgMJd$^j~qQkY=feXy>70V(#D zEZ0qM!GG*~vD^>7OKdgE(&%Rpx{Rna0|9~bkPa>{q&t3&P5Z5JwZFt+aRvX>vV-C< zO@qdJ`JDTqoP-2^7w_vf5xze2!mTcU^0etF>F~Orpl9NrxRC}|`*&9z4peyE7hj^F ztS$92zD&g19!jKcZ&mM!{c^2W=WbM_lCK!I`>a z1DAD;>)uVfav~Hm1ufpK+`HA{Vid{d=Wu#jM%F#ZVV8mQTC~q?ZJtu8jZ!)lAA$dp z{FzW57}79k-R|IZN!f5i3-#<3(j=0K&^jwUP|wnetw!17AidV>CE1I;y_Dt(&twZ_ zL>mvxylU$!>ti=MoV@tsWvYf;jq8Dx`(>BXuiHixX5G`hBO~P-yZePslWvT6jyQ$e zehM1WY@W0Ii!_IW()M09-H@&+m!Xi*s+jm+RcH^4Hq&fY-qzZR&{eHET0Q|zpUJ2c zeqZ*o;Ewj%1-lIqTSG6cwGEy#oSQR@PJ)jfvP5 zQfs@{Sk1R-{pu}0raUfjY2({Qe|>L_1D>M(63l5Q4IRw2gpz^G-7)Ax_R}v7G1wQK zXYOS-F_YxqW3BKQO zz>;*SKUlV88S&mG3ULkH0vJJ)+ABH8V7ovEDMGY7AxB-ze&Tdm<@h(v;KT}mLhN}4 ztYNs_0QXPJu8Oim;+aAq0HcH~q0|iHIvH07*uJ(=;TtDEuIZEx=?;*adWgM>Mzn26 zz*T{Oy!yUI@?{FzVytAzJpS|_P*2_l<;9ggLWJ#P&i4Sw3Qc}*lLP?{L6Obp6UWiNafcXyBg>2sj$ zT#Uf-0*R%}DAXO$u;l=_zKQjif5^qH2av!gkL%6w=JhWbhWCETRDaONO0o8wPY^UM zvLv9y>hpB+2#3xUA3o?p#~G!|C^mRm{zsclAl7;YQz_&KHu=no1kck2LRQx^=t0B~BBJ^$3SoQbJ+A;~T-XycPo+&aIB zMs%BNZ1Lc!%mT~}8gemQ-GLy{@4&nyOFLH0rG}=WMB+(~wp9vkx!z!Z=b7u*Hj+_% zc-C3uS5nUkqeH0Q=XC*BPaxLgNu1~e;CdgJNiu&xUX!_$9gAVU7uf;Um=7kb7)aY_ z3i>TtN2PN_1N{h;zXhPP%ZNKQgIM+Lg$I%}buK)AO8kKmmCiWnWUATh)ktUb{5q9W&ebSv4RY)f3xYJNA(pY`+^d1NquoeZR^$%3QuEa!Cj8zP=`b`r zzc)4QnVjGK7?{o{_YNSW6`s>+Uy@#UH`zi~&c>xEplLB@i0N`#|E%0Rx8^n-18PB| zDy)jmq(-IaOuAC%JzAB3j$7ZyUGbsOP>g`u+{A1}PiRyX*}C`QzjhYobFZ~)VG872 z9rWlv)jBN3c}m$OH@fAx{RAM#dzptb7AL!;l%x&a+fTpFWvf(6R93(z;GS$^fCheIS(YKAV3IGj-3Dg{Y@{t_68 zcKe++Xt!Ts?t__dw*#KdG)!&XPhL-Mw^r}ELwu*0;>o(S#6 z&FdGm9K@xZ|6W1LC^vJ;*8tsOE0{W&8&mk~a(_8hs&$3$Q`T?t`-M_TYIsT!R_6KX z)HqB?FiIZRRS?T0l%?*pLs{wyUvZ7YsQRU%gW&b2S!lvPR)?k|=qSg(1c z1YK?@u|6LNM;k{IcIo5YDsT7NY%L!cd#*8p z1Yy+;3#lFr5Ss!AZVOOLyEGToRI~ekstCVWfs}oHmQmVtNZN4{=)}Fd4X7-X(XOVY zhR$VrG>`!H0$kNV%O0MFNQjKw9-w&#s$FKR&W4XDs%_NpUN@ldD-LH0P2 z9gUebrV%AwQ1Rgyym9<3Vta}U||3EF%0Az^(=d_~2I@;Ol{P&MFR0;*L}fJF_O@svRoW0SgG+ zINzxFiEFU;cj7ZCe9;UP7CHcut%^?!@OrOB#X3|_o8CX7O&tK7Wf!uR7Bl z4V+b*7I>ixjdOBj68s9_iHK*wrdmOVGg*#dZBNE8I37X6>U#fGb@oa{o`3yJ-rV5( zjXf_K15!1NfmOCtH{)E-Zt9ucWBN$Yb>@=XO*UZRy3T3~lUu>zFGuCEYKt0>aUf}4 z_5fP&kh3MT^pa*iuk%Z1X1CG0I$Km z1s&1bBnD?X99Gn9i0Pf7<+~5ZB>x85^R2nZE!bGDGzbUbNC+ZidRtS&yA6E-p>nTR zZkzuVwDNQZ^FbavDf#6X`F5|4^EHRhml{^2|h~H8eW&_ zU)3W?2~kQio2elF627~8Z-UU}hgbS(w#wmd0{!+OD3t+LI$HMU@9% zctC^ZGN4_V)%z%-P`;KRKT^9^aSRu#0E4*Iaj-zvSZ3elqaDimgW?u2uLFi=dA^g^ zYMWVMs~}jcG&T#2fV9``;N>@z7V%AER!Y?C4E3NyfBzZ-azti9I6z7Bspn@z3jE8wCned`+IG;OTkmt>^V)Oa~3QVk**_%r}bS6j8{dX0DvEdrLjE5e3>&kvp zNtOPWcEHaP3V=Z*Gey=717}b8Le2oa;pMB{FsZBjfLyt4xX(^J4My{W(?uWMRh9gr zLNrvf^_xkGq+x!Y0qC6yS9!NP+GGx1HCCEz6)s~b?LwJuwrE(db-Rb&CFQ|9S^vX_ zLHY^^4G&_#maa|PlDQX8e0{CUpAXZoD*B|V;p*fHx$p`TaeE3QT8ost(gvix)-Jp1rXA8$c-Qc zgaz}OckWxQ%=?fCYH{qeY(JR#H~52wCm~{UT8#eyh1jwm}O6Zz{F*5^!y4J#Y{eigu)+sN1KYkgA1Y0YS4n zqHeasc* z)xPzERglg>kG;vUqkg?DskmNAnB?+WltUrsuD*+BFhC_}O&D3TblA|&e%ElO4~Ask z`>^M2GljQsKq%xdT!W~loKf6@9z*db#$l%J=L0$9y)aPOJ&-u9--hn$b??Esw`liE zswe!DaVlk$K#8o47;Q)&@eUGjdKDEg>%*HdO9X}S*S9y5Nb%dWijfsGGn9*OX)trV zZcEy!VulV#dmu1Ya^MB3iP%i{Fi?QwEj##U>AeFJpP;(wEgU*q9Og$IdLREM?`}QA zBoW!ghNs8To-Jpi<^3c|?Pl2{Mb>3mbXG>^E~|&IY%pIRN3;!KNg^@njNxL&46k`J zAVnI`DoAXVAi$*$%wTOch&g7Ro5-kWMXJPe-syl*LDk`GSgL9#>AhIj)y+q(;ZeoX z<;Gigp~L-EsQ9_|doT#+GmXu?XQv^Ftt8=_s4)L`=cJx~5>Y%=O3?$=mII?gLN=fd zqqY3E&o}9?J*zx)c6rK*9C}+wk!!W|n0&R`)ns&-ElmSb@?jmmgM&HZTE^xaff2V~ zQ3CmE7Uov(=TQ^!9s5Hw)O|*r~$5I z0nnbxf*#LCwnrOvx+)b|#s^gRhyvAar=| z=!RB)vZ?>pt#f&BL^Gvr5p8*y%Belrk+3>|`gj=HX}~V95e2K=_k;PoQ?+$O=@u4% zXgazwWRNme=E!!gcObLR3pi}qY}(srq?}A=cwH(e{S@@T1XV{v@43$3qFykA=MF{` zF`!M0zZ!zjWk0OZUfm#Gji$?o{+H09717%Cga^&;4Q-!CxyJJXzI;LX1~oCY=?6F% zmDAD4WxOq7ptLlvzU!cUtbSdPBPX^g<_T6zs&kPc2XeLGZS-^rPCtdj+g-JA!!FAd zIJ4@810h$~RU}sxvSeg6w0OnAMLo!bJ(<(jz2q2>Roih(1n-Xz4nM1W`fXRXymWEu*>gAxJ|MXdhTUT z4fk9B#(l;MY#MtCXoEuIGL~y|6o8|;U(@S^m)^%ItIA<<(O;};;bb7bcm3`k3pIer zz;2?ap6IMtU~$L;RMNpxa)twYg-gq_{sX?9V)v)pVKX(^VP|_!;u$?beGPc6x{YZ?wuUkO^`@=n>V9xa zsiiAZW=bkS^^g3r&}KOC1$~DEWlw40<(0Rc$?)-b#=9waYrr;G9rD^=mComt0LC5; z$_~~Jm-&wN_4I|(N2B5K8}I#<*hT%Vnx87mqDtayneXD;S|A?!%WWIN*)t|!MeDdW zV(ediXhu9!LtRg0+@`)wsE6$gb|nGlo?qdK`OmA4Y)Tp(zR1=N&7L-2L@ zHW0h;E|HgR?_{Q|70qsHjt{h@MWN>f`N1)5{Ujb{d7c*@4UdT`tMl6;6kA)W(E~|8 z7!|9W>f;fA7>xB@qq-o`a~#hLNWp>FGpY>qeJM6Gi-NS4>X;(>$$ccy+S1{sz{>nL z;gOa3R!2U0NI{-Z3iI$M+c-PzbH~C}Cypmqe_;wvpV)t=qtUofrmsZr$Q^qnc4NnN zj+|N^Z%?pcBnid6cP>jZV7v8#KtOG?q0Sw7#V+NED{x9Z4ss1-X*sGo#5G=rhD1R{ zqTjDz4w~defkufP=P|b0N`Tkv!W9_??3Q9dQwVwXT84(|b*hcuPTnJ>r9R^|!i_JZ zr{eOI#v8oB@&zE>NSL&~g7Kxb8X&}Moug{oem6MBeEB|9rU_L>K~KORbgqAHG$AER z1?87jp2sKIM5N4G-&GM3Om*RSfJa3xey6U06sEYPm1sg5bR;JQ;hUTDEIlfnQ3(JY zP^L$KmS2ICMBX0PVDtpekbfq72JJq5QK7LwL_v{l1$>$)Y0(V-a-9=~QLVVG^Kf*? zsonIdGghVsbORB$8WL-fqW65uh?OFAzp-pNW>jHv7*s;|NbkCaI&;fK9GcdkD5ZJ9 zl}{C%f1hvWkq1@$^sJZG^da23cMZULRnX5cgjdnS(ByqE!e=|pRhDMh2Fl1R4*lDU zT0g>&N;uW-(q=KOvpkdxl4aqXWF812u&XcK55gng#xxMF&NDBxc2W<$c8x?Nxl10bXIX9JScoJL1IEvKrVB70E>v=owLPqK`oODNN2~75viZ)m% z-pyjL_I2qr4P6tA43+o8uQ6L00KP_%cq4GrU(3O(HnXfO8gT zH_)#=f$ChE!uHAyK{AD`j9uAM1kLa! zNqNL6{B9$UqQ@Y3r4q^u57dMgG4OmmR1B4#GutqWp1D9@0I|Q|Og0O<14I#kl;8Vr zuuDA3mu>hyEw_>Xd~h1U1I`N1T#50z5IUd>Y!BGx?LmD78#KsH;b<4n9HIh2 zGlY(V?yZ{LKx*iae?3dOgV3x(I^g=Y_lpla4nPBe(jF+C7zb&yfIjTgsss%G!|4+z z0-i{*@f?CO3o>$1ddIm^ep=iH-63mlXO2pBxXlDdv^IYYb?q11FmUSzMRc>zIw?&C z4zRXzw@Iy(n4ot>fF?CTTL8|0lItZ$$O@}F%sYp-gM_%)HbVjdPm!zWW@BS!IwwHB zos?DQZ>+!ojBzccsRP1Le%9IBQeC{O-z{NfphPD``zbJPsNkoLJ^jg=$^Amg)!}<& zV~)d~nj2`0Uy3|uz&ZoQfG}|u7;(ZqxSwSr_<%b(e6cPzo@Fk#uF_)xS|aX?H1bXBQa7f(GXwo(Zh}Xmm6VP98S(6w4)R_y+c`vF-D% zmumy{=!>xlFGAxSeWuyaabn}?$vn7ZF;i6T4@08j^od_6M1#WQ7zCHy7W-&n4i1nq zAZW`Bn;IXb!A&u*=fLNNd$u~NZ@};l*8nCmZH+2tlkL#!7t4;3ff0XI4}ObH(g0kz zc9sFlj>_feUOgNgn(`p=fJ$>FP>aK!osV^ud3D!nr~(L2N6u|^^xx=#S6_}O86Aa- zB5#!tfy-bIcrgvV{v|XY^s>gx$k{2;JvdZF6N8;uc!sEd2G%#r^4{MY!Z-a}O~Jzn{s36+ z{|6TuVK@K3cGJ?VCrr`jmrv(t^}E$W+!pgK%-tbTLuWnkPOD1kUC9=seN2hXz_vLA z0v_#RG4F@g_iM-lPBu?Z%7{vQf~#zT)8B}-k#863@L9Cqn68i;?-#!(VX#I`>=|p; zN-~Pno}^Z(Ey$6tThG2WfptAT_reX%$($76Iv|Y=o(016zDwlhO>db9?WVOJ=eb+I zM{hN8b#+*}M4pSFrD9WF#Sgu1G%@N~Fk3psfc)@dJn-Yowmb(`%|rErSFyS>MR4aZ z0bBhsx8%!0@~x)tgAUh7V@U+t!CFj;_hrEtpQTFg+0ZPzQ-l&Nhh1OGorqf;XEjCg-Qrq~*qm`r>4*d&o=%Cf!od{QgF4F32z~g$@2Fsov|^Rz{SF4d z#|^nKr`0BhS{f3t?Rh0|oS5p&rgcvy?*qlu$i-gY3Ct6z$Wnd3$c_=uOu!1zJ?|<> z2K81_zgq<{BJ*$)_q9pF_&tx9gTZwMSQrxDcSewK3qmGF4nM@&t{`YvI+O&^>h+>wk}txV>RsUA4HJh9~LKRzCT3i_`}=<>%@ zZVIMhxo>kq73llwaS4j4?6nGSkiWAH95`rtb__NSB;e|BIw0_??|IDC6j~yz>w;$L{1x8yuR2-`zN*C6iD}z%$Ya0q=SuEFDR0K# z39S7X+wU=^g*cSu6XO}BMYD)^biKlm?dW~p?dMPLM{iN>p2@<-%D1}%);9>|DX6yC1~3!Nx1{F?lP2-A~I#OX&l$anRwqZe#6)8F8KK~M-_5$Xm{1c zlb;xi+y7$CnfqC?c_$dbF*s1S^ z*|s}|Uk#otm?}Ht9&dnkbmeTQl=%1QeZK@97^SZ`C?Q3ekyxp2_fpfZvH$s}*X|&4 z3dhfBSR#$y8*KISRYHR7AHq;Fs~{resl?IsHm_YM{1ex9|_ye9(zmI zOKUT^Y&Z>X%N~~xP?E2i3v&r8Eo^Th$|nB%S^fb1RRajM0ruCl{-HlrP@IaXoT55L;AJvw{kJ*-vHdA{ zQaNZRa%2_tsPC?tV5Z?A)XqeEsi$*xJBGg;M3d}!{)D?5=#ukS?uKsMRf21ek^9j_H5(J5KA-<< z=(q~v0au>o-Yh5K0DzpR8xbp5jxr6@BO_|fzh;`Q;C^47tXX-YQ(G!TNCxIP@|;3p ziv3dxl;UEiOU+(Sg4F+d#rY=$HM0Yi$DmgGKiO$9$W15iS$DEaN&H!Y7PSDBNo1ye z9NS9Gb@fvUd9CO85h0-Rr}Gxr37O?yqG2_uy_FFODjHZ*Ioo;u$wD7pGBK z&Vhr6h5uwkNR)z?`^*dO2d$7o_sW)h-kWkWO1ql!Zgm7!{f)!Rs>YuG?kT6Xy zN{ig{|5gjn0UsuQ)~9eTM;_kHS|g7;I`p&JxPB447(Rvp7-&bHqftc*wL}9v74w{Y z`t4`TX7b+2n|#~HKgGZ^y_vNg1+nncB(F7~)0~4s&=C%C1Bg}aqy!@VV@$@ZgON;J zQuc7j?RgYO9!&VxK(*E28aVKIfj({BEYr<43E_m?*i)K=0u6a=;2jKJa&I=1Z5t$8 z!#0WTv73mo;s+1G0G(G9qsf|{!_H2Zk)rAI5*I+L^n#yvIk;HBJ9@)50yc#!<#Ru) z6l<2+w)xoV(g_tLqH9R|?zz+dKFYCyqK_+zK6B9mV5I)JbN~LOR+rmtRQy6=e&O2` zD}ygJVCdQ4^aGC{D0&dUm@8<-i0-(5#1!sR--Y=L^5xf4fn31)LyX0rYco*uUf6I_ zQ&3P2hM=ExHHFKC5JeaC=yD0E%q4wji*tFfTKD~QMr9_n*wzGSt}Kv+Av#9>GXIZ` z5LfxpPcT3<_3&-DlRB$VB0ZwFi}wOQx=X0TSZzV<3n4-XkNJKp;*_Tis_&TQ?sn)tO83$gu6pIOEY`drl0u>OVLKvO%uz#^c_it7s2E_f^cO&r@f|aG-s|TDhm5s<(_NPn&QB zmfUH86(qcvP8XTpt&KfgC>f-Ir#t6owr(y0G$rqXysj@aw657*zz5C;& zl5=I$Kn!WCmywT=1@XBn{iVXl?*YHs!A->2Oq0Vx7K6s*XMy%=!%rXDdw%s={_G%O z<`RkWv7aSCQWLPjWkwN0bt)RFMWvGRmJ;t=(0dUN?a`A28MuTf!dc39A@x%x-@O9K ze^coKenx>|=45mZ1i9d!roTTm*h46zWD+U{U{E~+zrV@-jZLW#OFkwvm;+voLo-I~BMIe*0VGy)t z>Xyn}>xW)2^2pO)EIHdlVmegh3w!HE!4IBkDLOoAYV&e(rP7mChq*c6XFg8CH=b@~ z-Ns!Xq7^k5K{*|xlqC-PA+X(FL6E?2%sgJ6J0XTL_YB_KYd?x#1UmcM*Np`}9Ljlc z*IR~fpiInLA?1v3ZS8i?ZPGU^`+vApvtLCCF-+Eh7JDB!+u>bIqu#u+21{fs>5rWK z^XY`!<2<=C4KLC(Qalz~7h5J@g&MTOv;isJ5WL!~AxiLIFXtuxrPv37+dZ9AYmsYq z8Nut2zi*-|WN70@+PiK8qs6J0LX|M(@xN}X_tL^Ocj1NyR$cgGx>+}zGr~gSSR?%H z%t@jdpErugX>=mZh+dhsBF4HH%pXr5|I6j}BISq6iXm;h=h(T%vm=pNytfZfXBm+( z^b5zkx2%06bk^Vd7_r#3hww4NWqKQ7*-wFd3*Vew0;#61;5|p8Mv-l3@(?hI3i_R< zkzhHAC5>^)x6dw0IkmjcWoXPn7L6@$CoQ>zgEgd1urvPc zora0=hDd-fKWv}e`N|-~KF(|XE&G{_L6}hwg)`}Q6I|Gr*A1@y(iN<=Q0`38>5Opk zlMIqo^wfOUXu>;3?uAj3qf&F$WNhhae5Xo{_7%l4xKwl*Q4;f7TOcXOjIrc2oYj+< zVzI3aXJWYo6PHjLO*s5#j-DoUO1nZKOpp+N7qgL|XyFp6vf=juYg{SLUBBK+i|>~+ z^<2thL0*fZqA#2b*-!5v{Dxjuc zewq@om>yB^m;BqS*!h6?Xcwpd+e%6|;&=rE8sjbS<9w$GJq!2QS2K-uWwo1ZLruND zfK42aWwe{p6?B_onCPk4Y7d#XX9ZHb)_y_ha{RloX3CA0MK2p(%pfra@vulU8(B*- zNY&gWGHAnh4swKdW~^r*?4B+;wEjsZZS+Ly9C3Z>X7^IhV$aW|U>Oy~pD;b#fIuaS#>O6m`ZLEeM-V+~OG{Ge%U)CG2W=5dK2A$a;Y_x=x;{$yW_h{%@t@J5 zCC59Vwr?9ZVW5z32Ckq~NI}}6U`3^Y(7G2=cElr_#EjWsxgVB~mRruV0u-BCNGu6h zojZOFwoU9Ir}|yTJ-C0amz)UW&h@H&p*J+iu6?S7xoLxg5qW5?oD$!lKptevTl@l)=O-`@uzSYFq%VW{4R0KO$#$*)g zZ^{rLcLLmcip#9#`{je_dD@+MuBP0(7kmm9&WQavvEj@+|DB16qJpmTuz1NfriNlE z7!zOYf6-ee2tPUL!HxVl&3GVMNiZO0>04$^WLR9!x4x4NDuQZBMR zfCn|N+>=fL)Z`$G^&JRvS*+Q=TyI~o(oo$>l0;r>LFNV#NO2+oFYBK4lB+rS1B4%M zLu9U_YcWtR;zVm`ZzXHS20%o}j3`vb^&=P6_dxvOqFsImGBEZ;V&hS%W$QeO=Kq<& zK$)*{F~z8In>yRyW}wVH>Q4IC1I4n!RnsjBT;v<5L3yL(Iv|=r1!!vESbso#K!mSi1SM%aN>)FZYl!roTMBL zGA2xNK~yCms@2m8-<#Pus%pk4PZ1-Jy7g93aM>1h8APU1JF{1rj1`3|c{=VJwN3Ff z+;uu$!+(m8Cf7ix#aq%t>Aw|ev@py4^hE(CPAc^Tk!E7vXc>3l`0UNc6VD&TW6TVc zmx5W5u|r@Z|JL~xO_|+Q8tk%CW_Dx@H9fCMS~!0$Dy5rF)@-6x`#q*F;uOw}P%wQ8 zKcBt2No$56to~Tz{nx4zv>Pybc7Hd1K|K+xz4-SBQCD&(j#ynmyFjac&c7fIEe}C zwP8)4X>MM6Wff-Wf+2Fn;e5Q8%KilFrpW#(vIyOIR&;7t(@Q)x0z#GPWV}W_g>FTU zhtx?A0!@K1t9wppWAM)&pImLLDZ7l+(n6Ys4cg}T5+&_Kwgl4@Rt|*Jwd!duPt9{5 zM$@n4Dxws|f)UC7ys_F7-x?JyBuh2D^Qmz%q+OjnB!i*O)aTCJ9InCseI}ct0~D|X z04~huH~c*Rs*J#jrR&v+8WQcsyN``?4ec7_4NJP^?C%3%`gx=U(ICWsbiH;nk-MQ3 zotyFU^4yG`*5fHj2O6==1iIF&W)vl^pdh{?w_VW)ti0_4@v~bLjo(ENq_Zi$mhmXb zs8DYc$5ne3>N0A6R?z0QwW%~^t^A66k4~{Xr)q0aOAxC z{kiK4tz{y#+dGEw6_o=2?gJO&+M7pFzAQ_LFEBUEuS)Jn9W@)4i+P=aZXJD@@Y2yR zQ%plqnDR8_;UdP8@YXsN@n5Z)*@PRk_DbRn`^h;B@xjy+gAX}=jbW(kQJO|(FN4T_ z3M$(2Dd322+&rum<3R_PPeerWV! z`>Uip))g(W;fA90WUOC}^=2+w-OOtEUpjjY={as#FI#T2tm{vx7F~!=IPu>SWTyOq z3rEF*?-$U!aXYEI-(Lx5)(&f&p+<*Be|jIUAjIL27QfJlc_sgLB7S$(jEIclCfL13 z0#6o|FBliSl#_yz6Khz}G;RpI!FRYCb|YBcB5XVlSgfV_+~c;*N*cAE@OoH9kbEf| ziUKUpaR|XPWau|_m%+OGPbs=g%$AKI zDZh0-D;T$re53SWvPv+nu5VQ_1-rP4;q_@aX-i=5ZohCWPf>HE3^Og-G@u~3g)Vzq zf_;@b3w+>}f6<>$TT|zj<)d~Ei_-8IUN{fh-adWpV4!1SX55pqyJ@QsfPLpaXYd(aog)Q8CGcl3jnpGE#cdmq>1@A z`lw2?dnB9XI!IVTxY~c;1($(f-_-H-0ZOFA%xBuQLjD!_$oL+fAeQR2!2T5*of72# z0lP~3qC87lNU|h<7QI^XdeELShx?niq^=N!McR0Fb5mMTdpzv+L9b&=)Pi^nFrCf3 zhP*JVBIC$J?q{knUUX%^Qe9NbFh2sN6jNB(Ex)bP_xWHO&jKhBzLN2IFYYKJU-f-7y?u79vC2GT<+iIVU6``4*`xjpf*Uf#*-6XLZp!GbhmNQS`;xnrV?LC6r$z)6o? z$d5~m2Nv1`S-BH$p|+0Z(=zFj%W*_56->^^al&AP+5OgsiwTPrF0>1_qRGpt@W~ac z7+0f;4CNieQgIl$jl*01eDWPPgKyVCt^)EbOp9u<4HQimOuUQaF8fx;w=UIh`%E6x zKOvTMD8E1C;%-Rcvo!DR#hqhcCHu~vlNy|*CO{cu1p@dL<*7^Unn%rY#G0f{N4`C4 zJw{6k6vI}}dObAXP;H8_+~uIlJL}2S1@tR$b2e;dlO1_hz#*+wx!!DP;e-U$c40}6 zhg_yGPx;c&lPSg2KLL(+Ck-9ezU=Np`Ca2BPlPopr+sSkzO0m9GPc*%C683b!)Z+B zu_jeff?otiCR}x094|}AP8xmpUR{xxpN(0;z-9CHo6-X#mZQK^pZib13B~aMeh2Ef zG0lA6PQBTxM_XE}8()ta>XKhLD2kVwZqiJkzc3)ffnE|-F~X>+UnMrisL8?L&6aTr zDSjhwQrbKnEl~@7mJ9oYD-1Co|0SrQf+1iID`UhnbA z2^zeTXE#ui>@qyypD$&Z) zWLmEzgID}OE0wK{M8|kPuP^9iU9map)<0)^Up2JxQrE z85LhB^eaJv!Ld+~jht2g!|AVZh7yJZ`i76EFyJ<((_6QEI*lqXrlJ|Kymd;!bdi{d z0QL31qf#{1ClqV17d)Q;YN;!qFWr}dT(pNGYpz_3C1KsV^fZOc!o-^87AcF(p}Z&b ztcM2lEG@^`HA;6^MdXu%bW^ekf~d{$;n&ty=SC_;&0*iZ)(A}XR(ep#9LiupM~_VL?DmEltHVfwS# zcn|O%oE8%dRAc$>JXb=v8X9sYoP&W!j3Zit>tS;TlGTv|H$D3nG zwO6{x0|W)G!zB-w>ook&duJd>?IlF%cE~}olX(C{8&o`rTgRm_FO!QfRFT(@-5Fm(i0kig z;FR`9(*RE+;sf~xC%-ZvJW>q1C=i#jG7l2(`1iUCGZ}f`a9jqCUr|iYzEQcOR^07f z-rryuhx~cCeE#VL2`;)SjhYpHas7Yd( zdHmbZg1HjPj0|su11)>_7Kea(`S4?wdj&T8&1-|0PcJNIW8??g%VIeaqeMTr<>%2H zRWo&r{Up3E!UeEv$QHh1OY?zA#krb{&lV^hDGKjXyB}4r%9Cf|M*AIDEJK5 zZHV|5hFur6ydXuS9`<~39Qk%mWlsfiaS=iRcQ*ge|9&yB;{R*caghH*zmY96^f!5n zPZAw2T)~BnNK-G5MMH8n*WWn*U;8dNr@{SLgvE9JYZNb)#^Qn=4}#FF{G zLL2F>`i^DtN-RFK5bj|)MHk%r?i%$RH(|QxO11<(|SQ62@^rFo7e`WZgSLRPJG%)Fs;fGy+=rBgI ze_7cNSHQBjp2a-ad@hvv{?V>VFnb{eIq7V*e!$ikZ17S;v`9a){jE2j!7<;T*4tdc zhcS}wznasZxw*1D1J)IXAvfaHtp0qI#60{lZ}S7Y8Kqk{KYu(3Q5WA_vyML?tiDeN zyh+?K{4{s-F>yxegg=kHMC_gQJrvjfUgYM?ju%_bs?XBxp{uZLgjA;HntZb;KIeOC zfPcWQJ6-fjeAl}_FTF}DZa$V>GzV8YyWPW3kyz9e!mi@#~HJE zO<$=xHfT^+0kizBrJ5ww*juvc^WJNSRJ37s1 z-0Z`IUH3Xt@#QNLVtuahx^FiwC`PusG)|Nq%6YmOgX3X{f;Q8TzstC($F68v?C$ja z950m00w-UZ6YCnk#QPSI8j0!25k{VFmsXk06S9oGOg)wiHNp$BVGsY_TP5HC9;q4o zn@ zab85ziauAd`zEzNPuqQ4rT&gRrg8MiAJd``g>%{EIpj2brj8U+$FF3cy49X`%%xi1 zuxXu!v@*+v1P}JBp|jGP*>UYWgKzne;=)iOljXZmyFtI(bsv6v`3NN@c6WkfoE<$zCQTThXzFDN7i`s8duD=a6M= zC)+gHW#7h1$7=0>Cn`6Xkk;=tQqG-cgdg{GhmH#vnI=}trimTr}+{>nqQ>n+4ZFz`% zu-Yx%YCn>{EnMO4eTeJ~#fFzDv+}#&x;PsVD((F3U6z53W{g+j?RHzu-nZ(MH{!U% zyCd#lUp=Azqfv3(me<$9Tx~|mI)u~y(6^Um6cbNB$DZ65hH8{!ICjDVWYmCPQH0_; z2HoP3aU)U0#U?&x9D^|>{F&luZgSnQ7O_PX;bg!+$gU$&qXOT|D4>J8%PwtCseke} z%lOT|L{d$7ciM~lt0eYMdo{WI(LgvJ;q$SLY$0`1HyK`}|uNoen_L^hH z+iJ(SV-zbqs%YIC*{&|TPledTzLM7U^~XEEvzZo^x|^)L3mwd5bO|TX0S3Zuvlm-! zcR@=D-4=ODn&+A#r?A!FNB=9?+DAx3o@Ubn_h2`B1ge(OTs_0#+wxGIS{stg!wPTocF&hV9!b(f}=!YWx3!1>XPpQ z_hyyZzuAi%P(9klh96;~LH#o-1s4wm`6&H?ofXqW3t?xL&y9Y0%~Lm;?9SSlM=T7v zOmtM@)TIFGfYRx);ki^gHSgR`niI}(t@zmiFDlkhe>k?^OVqlP(Pv=3&9HCF0tl*C z{mM52ib6bRTNc0cUICQNyijP|{6;CaU({~3ec_?&Y2AtweY%hy{A#7+fRgauXl4mE z;98pK7=ms1T|P##)gFW9fEP!~Lgc3v#u?xZs(r{={I%on+0-TqufV3n%eWStYr>I{ z>qltJT*QVO9_r%sPWP8?e08vgrHs8e$u&o8_BusxZkfU;BsZV`w#PO4QPA|%?_h@# zYvV+RDCD)u&zx}PeJDMqlk9XiA$xQ4J%%7T7ai$0;olToMdm5MR~yUrxR{ABl@S1s zDn=o?5w^12Z*;EPmnY`t_Mp_kI@iq)yY?r;2ZwdXf*x?rfjTo1I!AvAK1SbKv~E3r zk=a^DuXEk`&{hRIAG%?JYOz*NwZY#KklWn26@K?;#`8#ufnv_WQ#g2P5%o7pV{SRt zl6uI0=0d&CW#bNAY03PzgWfabn6@+qEy<62`Fa`L(LKt2>U4mq#*^RHifgni*C(&i zwT3y5NY3OZgzFN{k?63s;AKgeF-!X z@YTQH(?MOH-wYJ=>s1IP+1T9gS`@6@e|tLhAK9IyPY#I?b;cr6_RR55EM}Byyz1HB zYDJcW-+UF%6-NZ=`laCqMc*Tvh|x2pcgtDi;`V`qQvwOdOMHynjGN<2J zk`Pr>ZnOj3NW|Pm z_|BoSd1a}kebW#*_A_ljl@?(;Y~`+v4dJT8iWHL5UB~gxaDs6;HKqMkFh$}&uc$$o z0ZbEMpgT6c;TcEmIL$U-?^>2+%gbNkdk$$Of-;k7for&NI@0(2CQ~9GgE=K34!R`g@b6Wa z8cyF-C0WY!>qTQOb1N%$KZA+gu4!;1F2WmP&ojv%kFn;wqzIw)O?0np-#@#fCUSTW zKdm?xz&$*q((Np%$BcUtB2YyxGE(IOh^qHrlc++r@=!@I&F*?EEBv($I^pwPYn(sr zs{sehkY^1ubPpQ-JQ7N5AlDgp=-8qY20TQq%bh!(Igk)Br-PUQXhS(;c`)a5b5@%2 zKtO24o}zg7;m~BHg-KZEkIMnJ1$IUqzdNT_qKiTYz1=XFo(Z5MTB=82gXTOVC%=Ic zzED?ixJ9BdrO*ZLeO-FfLQD@Z2j_9Ik&^~aQxgcdT`m#H$=Oc7_OI{8-BIS8>&eL4 zU%z^hD{6@28tOhXL+8L!M3`RiZAQcno7fMTL(Y<&ScefGz@a>&1nx&bHvA?EgE`G>NC4jN zO+{DYN8tbuNDx#%oH&?#V3-jrfl;r>-$+TKg^-m0^AIyznnCYuzms{&=lXypfpa9- zt|(;C|7%?j+E}pWdK0iHAaDxLtsw|e^JE<{ zDh0)DQ4}5huBB9vh0LzG_nw7|I;(BZGpxz` zMt2IdB8%zk0a^8)J-{ZWXNGeO)z*e;#a01Q5CTNTY$sOB7jAwX4TkGK zUKQ(_9r1f&o88-H3N63ni@DK#AWA;`m=@7Bs36(bxa(YJ*u>sYY`wddzTytRX}k}^ ztyRcN*9W!=c(dj10FBNMMhxiAGj?wVOGr7!P47khr_e*5?hJ&$I3Vj!$D1Se2 zYb^b~ga#1eHVt4|^*HUlBlk9s)JjVVQLRq!m53&xQ?NgcT%T=Emb&-=&ORP9*=TYR z-e}b}Ts0bgB`az=H43anyqrTc`Y;eZ0v@5~T0ic)M7`@&_vMnNX zJ`b`~uejKW{S;rMhM!s+{2~mb>Qvt!Fk{JylK*162TT&3K2M>ux0{=&pX59o+Sae7 zPz#*`6%$1fSkJNzF5d)e9ZQ01XQ77{Rn~QG^q#A*@6P7+n_-;eE6>CL?fo$yLJ%3X zZ34fr^!BW7pt4>rABC`MpzVN?x4JBqKk)apc$<)@Q%Hn@3kkFkvdBC&_@JqQ3k#P` zG*Ys$L-*`%>$U)g@)rAzUKNCZi;uR=y6h_=$(ewLmUHQ)QKEi8P8fp3&yv`S$@%y#{?0;p^Y`N{vIx$@E=9%y?liA2Vnj2YaX0? zNSDV?ng@S!ce2YbdE}h_nkTc?mgQ$_me$Rf_Ok^>5o`qsalRn9fEy-{eRX0ZJ2U#G z5N~-#{Qb*o(}cH2Y!TP=I!B}+w8)e{UWR=BmGXW17rgj*;F5!7$m2Wy%DNQ z9a2*6^FxeF?#!;L%O?67Dfczktsr%rpiJKSFzk9+PUQTjek7ssh1NSqsern|>dGhR zR@?ge;90v&Luu;yG)v>w7EHP(d4^5R<9Za5c&0`Pc0h2Fv%RZiUK+H{JWtjiCJ)oR z?qTz1VbUL($Jtk_`mqzOU7$^ zS;@ZBg5r3JkG}umIj|GRN#92IGGT=4s=!{3hMqhgz+j00OlxPXxH*6mRysWCr-g*5 z0%Q|LyD|xQdi_dhxiAx$UclyGFHtN=jw;94tN&{;nmZn{L>H{H~p`Ofyz!^uu z>Ey#AP(N;MbI(i;4&sWnO+Q1j>T>_Lyhq^WufWgnU6~x3+jYcqMUraUA04GOg%BJDx|L2e+6kmrqw(0m z$an3+FmxUatvj*QFcpjnBF7owH~Az=i;$8Ksmlus&;$g-xsk9X(+4l3cp_o!-pxK@ z>3{}zG3a3kEuKDlb#XbiM?|&*4sX~HlBlLPh!KaNiKyNhZt($3uWSZd zh8KN6_CX+o+?=L&JwVaQJrQ!8idmtD1w}gBYsK{TM+cZX=LaDl45V1$?OL zYr_s|U#a2gGO*D*<`bV&3{d;{B-G(zEXu&-IJ@`y4{2#&5e2^+=pm$onhsm z?j&c)r(Cf44r!!^I;?BNuI#Bea`Rp8x28UFd`A|f#|Sl7hleNweRvx20JH`Td*fr` z1vM)+p8o|x)!MyUj){yw?b9?gc_ZKODm4eJyc=o4hYFFt!iJw5{6jJPQa2EU^k(PS zE5Vm6l?#^;V5I_A{nhryO%yk*{vU9QKsgQh?zmnBNZ*FHsHsu%lcMT2Fw9|XKu3tu zd~wV19~qy#R3@h#f3L`5&NR0mlgOFDk9j~bd9$2Upcr_^$F3H9Ls+~YQlaaiQ1T7< z5?+kjmnKuO2$OIj$yH)Z{5pn^E%#_z9@ZeJZG%GiwS?>>=6vNjUjwdvibI|jZM1%_ z-}nz8-$#B%?b`>;0C=az&f|HcjB_`C&3?os-e^g*K@A7}`cW4PpF=F4tr`-CGK_`k zh3_oyKb7cv@6hI9)@hbVGBln+5pTP3DARK3QBaaFv9u<_&q5oJ$wKKia6LpEuoa~* z2_gPD7t7Oy(Fcn6&+t?{d z1VR}&#ybDr87i_`2>K&ka!&;Jw3Li|jO^-YBrY;rk(^>qwg!E;$YBzxl~tmVNexju zUqM{U(XEC3tKT8(ae2L$rBGzq^Y|JFx*RHfhx~NmMPe{&I{>VrWU^WW5buRiW=xwM z&9L%tFfBY9LKOqzThaU}VT-N{_{Zt$2Ls4Brr6xy z0q=iqsIthao Date: Mon, 19 Feb 2024 17:25:28 +0900 Subject: [PATCH 14/28] precommit --- .../model_wrappers/openvino_models.py | 1 - .../visual_prompting/tasks/openvino.py | 41 ++++++++++--------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 7c0e8da2b8e..4400e6bf1bd 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -23,7 +23,6 @@ from openvino.model_api.models.types import NumericalValue, StringValue from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ResizeLongestSide -from otx.api.utils.segmentation_utils import create_hard_prediction_from_soft_prediction class ImageEncoder(ImageModel): diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index b7aaa85bc51..29342157aa3 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -310,13 +310,17 @@ def infer( self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray ) -> Tuple[List[Any], DefaultDict[Any, Any], DefaultDict[Any, Any]]: """Perform a prediction for a given input image.""" + points_score: np.ndarray + # forward image encoder images, meta = self.pre_process_image_encoder(images) original_size = np.asarray([meta["original_shape"][:2]], dtype=np.int64) image_embeddings = self.forward_image_encoder(images) # get point candidates - total_points_scores, total_bg_coords = self.forward_prompt_getter(image_embeddings, reference_feats, used_indices, original_size) + total_points_scores, total_bg_coords = self.forward_prompt_getter( + image_embeddings, reference_feats, used_indices, original_size + ) annotations: DefaultDict = defaultdict(list) predicted_masks: DefaultDict = defaultdict(list) @@ -325,8 +329,9 @@ def infer( points_scores = total_points_scores[label] bg_coords = total_bg_coords[label] for points_score in points_scores: - if points_score[-1] in [-1., 0.]: + if points_score[-1] in [-1.0, 0.0]: continue + x, y = points_score[:2] is_done = False for pm in predicted_masks.get(label, []): @@ -354,7 +359,7 @@ def infer( used_points[label].append(points_score) self._inspect_overlapping_areas(predicted_masks, used_points) - + for label, predictions in predicted_masks.items(): if len(predictions) == 0: continue @@ -364,13 +369,10 @@ def infer( } for prediction, used_point in zip(predictions, used_points[label]): annotation, _, _ = self.post_process( - { - self.model["decoder"].output_blob_name: prediction, - "scores": used_point[-1] - }, - metadata) + {self.model["decoder"].output_blob_name: prediction, "scores": used_point[-1]}, metadata + ) annotations[label].extend(annotation) - + return sum(annotations.values(), []), predicted_masks, used_points def forward_prompt_getter( @@ -379,13 +381,13 @@ def forward_prompt_getter( reference_feats: np.ndarray, used_indices: np.ndarray, original_size: np.ndarray, - ) -> Dict[str, np.ndarray]: + ) -> Tuple[Dict[int, np.ndarray], Dict[int, np.ndarray]]: """Forward function of OpenVINO Visual Prompting Inferencer.""" inputs = { "original_size": original_size, "threshold": np.array([[self.model["prompt_getter"].sim_threshold]], dtype=np.float32), "num_bg_points": np.array([[self.model["prompt_getter"].num_bg_points]], dtype=np.int64), - **image_embeddings + **image_embeddings, } total_points_scores: Dict[int, np.ndarray] = {} total_bg_coords: Dict[int, np.ndarray] = {} @@ -393,10 +395,10 @@ def forward_prompt_getter( reference_feat = reference_feats[label] inputs["reference_feat"] = reference_feat outputs = self.model["prompt_getter"].infer_sync(inputs) - + total_points_scores[label] = outputs["points_scores"] total_bg_coords[label] = outputs["bg_coords"] - + return total_points_scores, total_bg_coords def forward_decoder( # type: ignore @@ -642,19 +644,20 @@ def __getitem__(self, index: int) -> Dict[str, Any]: image_embeddings = self.image_encoder(images["images"]) if self.module_name == "prompt_getter": return { - "reference_feat": self.reference_feats[self.used_indices[0][0]], # only use the first feature + "reference_feat": self.reference_feats[self.used_indices[0][0]], # only use the first feature "original_size": original_size, "threshold": np.array([[self.inferencer.model["prompt_getter"].sim_threshold]], dtype=np.float32), "num_bg_points": np.array([[self.inferencer.model["prompt_getter"].num_bg_points]], dtype=np.int64), - **image_embeddings + **image_embeddings, } total_points_scores, total_bg_coords = self.inferencer.forward_prompt_getter( - image_embeddings, self.reference_feats, self.used_indices, original_size) - + image_embeddings, self.reference_feats, self.used_indices, original_size + ) + # only use the first prompt - point_score = total_points_scores[0][0] - bg_coords = total_bg_coords[0] + point_score: np.ndarray = total_points_scores[0][0] + bg_coords: np.ndarray = total_bg_coords[0] x, y = point_score[:2] point_coords = np.concatenate((np.array([[x, y]]), bg_coords), axis=0, dtype=np.float32) From a6e460d4f39f1bf23e0d1cc61d6f7f2d0c52c52a Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Mon, 19 Feb 2024 20:05:33 +0900 Subject: [PATCH 15/28] Fix unit tests --- .../zero_shot_segment_anything.py | 2 +- .../visual_prompting/tasks/openvino.py | 3 - .../model_wrappers/test_openvino_models.py | 6 +- .../test_zero_shot_segment_anything.py | 76 +++++++++---------- .../visual_prompting/tasks/test_inference.py | 5 +- .../visual_prompting/tasks/test_openvino.py | 25 +++--- .../visual_prompting/test_helpers.py | 2 +- 7 files changed, 56 insertions(+), 63 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index aa3206eec89..99bce5ff412 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -128,7 +128,7 @@ def _point_selection( # to handle empty tensor len_fg_coords_scores = len(fg_coords_scores) - fg_coords_scores = F.pad(fg_coords_scores, (0, 0, 0, max(0, 10 - len_fg_coords_scores)), value=-1) + fg_coords_scores = F.pad(fg_coords_scores, (0, 0, 0, max(0, 1 - len_fg_coords_scores)), value=-1) ratio = self.image_size / original_size.max() width = (original_size[1] * ratio).to(torch.int64) diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 29342157aa3..2611d488a43 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -481,7 +481,6 @@ def _inspect_overlapping_areas( self, predicted_masks: Dict[int, List[np.ndarray]], used_points: Dict[int, List[np.ndarray]], - # annotations: Dict[int, List[np.ndarray]], threshold_iou: float = 0.8, ): def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): @@ -511,12 +510,10 @@ def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): for im in sorted(list(set(overlapped_label)), reverse=True): masks.pop(im) used_points[label].pop(im) - # annotations[label].pop(im) for jm in sorted(list(set(overlapped_other_label)), reverse=True): other_masks.pop(jm) used_points[other_label].pop(jm) - # annotations[other_label].pop(jm) def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore """Perform a prediction for a given input image.""" diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index d000c818c9f..1db453d6e11 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -176,10 +176,10 @@ def test_get_inputs(self): @e2e_pytest_unit def test_postprocess(self, mocker): """Test postprocess.""" - self.decoder.output_blob_name = "masks" - self.decoder.soft_threshold = 0.5 + self.decoder.output_blob_name = "upscaled_masks" + self.decoder.mask_threshold = 0. self.decoder.blur_strength = 2 - fake_output = {"masks": np.ones((4, 4)), "scores": 0.1} + fake_output = {"upscaled_masks": np.ones((4, 4)), "scores": 0.1} fake_metadata = {"original_size": np.array([[6, 6]]), "label": mocker.Mock(spec=LabelEntity)} returned_value = self.decoder.postprocess(outputs=fake_output, meta=fake_metadata) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 047dcb4f8bf..3202f3b6caf 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -29,71 +29,71 @@ class TestPromptGetter: - @pytest.fixture(autouse=True) - def setup(self) -> None: - self.prompt_getter = PromptGetter(image_size=3, downsizing=1) + @pytest.fixture + def prompt_getter(self) -> PromptGetter: + return PromptGetter(image_size=4, downsizing=1) @e2e_pytest_unit - def test_set_default_thresholds(self) -> None: + def test_set_default_thresholds(self, prompt_getter) -> None: """Test set_default_thresholds.""" - assert self.prompt_getter.default_threshold_reference == 0.3 - assert self.prompt_getter.default_threshold_target == 0.65 + assert prompt_getter.default_threshold_reference == 0.3 + assert prompt_getter.default_threshold_target == 0.65 - self.prompt_getter.set_default_thresholds(default_threshold_reference=0.5, default_threshold_target=0.7) + prompt_getter.set_default_thresholds(default_threshold_reference=0.5, default_threshold_target=0.7) - assert self.prompt_getter.default_threshold_reference == 0.5 - assert self.prompt_getter.default_threshold_target == 0.7 + assert prompt_getter.default_threshold_reference == 0.5 + assert prompt_getter.default_threshold_target == 0.7 @e2e_pytest_unit @pytest.mark.parametrize( "result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])], ) - def test_forward(self, mocker, result_point_selection: torch.Tensor) -> None: + def test_forward(self, mocker, prompt_getter, result_point_selection: torch.Tensor) -> None: """Test forward.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" + ) mocker.patch.object( - self.prompt_getter, "get_prompt_candidates", return_value=(result_point_selection, torch.zeros(1, 2)) + prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2)) ) image_embeddings = torch.ones(1, 4, 4, 4) - reference_feats = torch.rand(1, 1, 4) - used_indices = torch.as_tensor([[0]]) - original_size = torch.tensor((self.prompt_getter.image_size, self.prompt_getter.image_size), dtype=torch.int64) - - total_points_scores, total_bg_coords = self.prompt_getter( - image_embeddings=image_embeddings, - reference_feats=reference_feats, - used_indices=used_indices, - original_size=original_size, + reference_feat = torch.rand(1, 4) + original_size = torch.tensor( + [[prompt_getter.image_size, prompt_getter.image_size]], dtype=torch.int64 ) - assert total_points_scores.shape[0] == 1 - assert total_bg_coords.shape[0] == 1 + points_scores, bg_coords = prompt_getter( + image_embeddings=image_embeddings, reference_feat=reference_feat, original_size=original_size + ) + assert torch.all(points_scores == result_point_selection) + assert torch.all(bg_coords == torch.zeros(1, 2)) + @e2e_pytest_unit @pytest.mark.parametrize( "result_point_selection", [torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]]), torch.tensor([[-1, -1, -1]])], ) - def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tensor) -> None: + def test_get_prompt_candidates(self, mocker, prompt_getter, result_point_selection: torch.Tensor) -> None: """Test get_prompt_candidates.""" - mocker.patch( - "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" - ) mocker.patch.object( - self.prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2)) + prompt_getter, "get_prompt_candidates", return_value=(result_point_selection, torch.zeros(1, 2)) ) image_embeddings = torch.ones(1, 4, 4, 4) - reference_feat = torch.rand(1, 4) - original_size = torch.tensor( - [[self.prompt_getter.image_size, self.prompt_getter.image_size]], dtype=torch.int64 - ) + reference_feats = torch.rand(1, 1, 4) + used_indices = torch.as_tensor([[0]]) + original_size = torch.tensor((prompt_getter.image_size, prompt_getter.image_size), dtype=torch.int64) - points_scores, bg_coords = self.prompt_getter.get_prompt_candidates( - image_embeddings=image_embeddings, reference_feat=reference_feat, original_size=original_size + total_points_scores, total_bg_coords = prompt_getter.get_prompt_candidates( + image_embeddings=image_embeddings, + reference_feats=reference_feats, + used_indices=used_indices, + original_size=original_size, ) - assert torch.all(points_scores == result_point_selection) - assert torch.all(bg_coords == torch.zeros(1, 2)) + assert total_points_scores.shape[0] == len(result_point_selection) + assert total_bg_coords.shape[0] == 1 @e2e_pytest_unit @pytest.mark.parametrize( @@ -106,11 +106,11 @@ def test_get_prompt_candidates(self, mocker, result_point_selection: torch.Tenso (torch.zeros(3, 3), torch.tensor([[-1, -1, -1]])), ], ) - def test_point_selection(self, mask_sim: torch.Tensor, expected: torch.Tensor) -> None: + def test_point_selection(self, prompt_getter, mask_sim: torch.Tensor, expected: torch.Tensor) -> None: """Test _point_selection.""" - points_scores, bg_coords = self.prompt_getter._point_selection( + points_scores, bg_coords = prompt_getter._point_selection( mask_sim=mask_sim, - original_size=torch.tensor([self.prompt_getter.image_size, self.prompt_getter.image_size]), + original_size=torch.tensor([prompt_getter.image_size, prompt_getter.image_size]), threshold=torch.tensor([[0.5]]), ) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index d595227fa7c..8b330131b0f 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -343,8 +343,7 @@ def test_export_to_onnx(self): }, "visual_prompting_prompt_getter": { "image_embeddings": np.random.randn(1, embed_dim, *embed_size).astype(dtype=np.float32), - "reference_feats": np.random.randn(2, 1, 256).astype(dtype=np.float32), - "used_indices": np.array([[0, 1]], dtype=np.int64), + "reference_feat": np.random.randn(1, 256).astype(dtype=np.float32), "original_size": np.random.randint(low=0, high=image_size * 2, size=(1, 2), dtype=np.int64), "threshold": np.array([[0.1]], dtype=np.float32), "num_bg_points": np.random.randint(low=1, high=image_size, size=(1, 1), dtype=np.int64), @@ -360,7 +359,7 @@ def test_export_to_onnx(self): } onnx_outputs = { "visual_prompting_image_encoder": ["image_embeddings"], - "visual_prompting_prompt_getter": ["total_points_scores", "total_bg_coords"], + "visual_prompting_prompt_getter": ["points_scores", "bg_coords"], "visual_prompting_decoder": ["upscaled_masks", "iou_predictions", "low_res_masks"], } diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index cdfdab44739..717e4a7d703 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -204,6 +204,7 @@ def setup(self, mocker): ) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.3 self.zero_shot_visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].output_blob_name = "upscaled_masks" @e2e_pytest_unit def test_predict(self, mocker): @@ -221,10 +222,10 @@ def test_predict(self, mocker): mocker_forward_decoder = mocker.patch.object( OpenVINOZeroShotVisualPromptingInferencer, "forward_prompt_getter", - return_value={"total_points_scores": np.array([[[1, 1, 1]]]), "total_bg_coords": np.array([[[2, 2]]])}, + return_value=({0: np.array([[1, 1, 1]])}, {0: np.array([[2, 2]])}), ) mocker_forward_decoder = mocker.patch.object( - OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value={} + OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value={"upscaled_masks": None} ) mocker_post_process = mocker.patch.object( OpenVINOZeroShotVisualPromptingInferencer, "post_process", return_value=(self.fake_annotation, None, None) @@ -421,11 +422,11 @@ def test_inspect_overlapping_areas(self) -> None: } self.zero_shot_visual_prompting_ov_inferencer._inspect_overlapping_areas( - predicted_masks, used_points, predicted_masks.copy(), threshold_iou=0.5 + predicted_masks, used_points, threshold_iou=0.5 ) - assert len(predicted_masks[0]) == 1 - assert len(predicted_masks[1]) == 2 + assert len(predicted_masks[0]) == 2 + assert len(predicted_masks[1]) == 3 assert all(np.array([2, 2, 0.5]) == used_points[0][0]) assert all(np.array([0, 0, 0.7]) == used_points[1][2]) @@ -484,9 +485,8 @@ def _load_dataloader(module_name: str, output_model: Optional[ModelEntity] = Non dataset = generate_visual_prompting_dataset() dataset = dataset.get_subset(Subset.TRAINING) return OTXZeroShotOpenVinoDataLoader( - dataset, self.mocker_inferencer, module_name, output_model=output_model + dataset, self.mocker_inferencer, module_name, output_model=output_model, reference_feats=np.zeros((1, 1, 1)), used_indices=np.array([[0]]) ) - return _load_dataloader @pytest.fixture(autouse=True) @@ -510,17 +510,14 @@ def test_getitem(self, mocker, load_dataloader, module_name: str): setattr(dataloader, "target_length", 8) mocker.patch.object( dataloader.inferencer, - "pre_process", + "pre_process_image_encoder", return_value=({"images": np.zeros((1, 3, 4, 4), dtype=np.uint8)}, {"original_shape": (4, 4)}), ) if module_name == "decoder": mocker.patch.object( - dataloader, - "prompt_getter", - return_value={ - "total_points_scores": [np.array([[0, 0, 0.5]])], - "total_bg_coords": [np.array([[1, 1]])], - }, + dataloader.inferencer, + "forward_prompt_getter", + return_value=({0: np.array([[0, 0, 0.5]])}, {0: np.array([[1, 1]])}), ) results = dataloader.__getitem__(0) diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index 5775c7712de..a1b077cf557 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -202,7 +202,7 @@ def set_default_thresholds(self, *args, **kwargs): pass def get_prompt_candidates(self, *args, **kwargs): - return {1: (torch.Tensor([[0, 0, 0.5]]), torch.Tensor([[1, 1]]))} + return {1: torch.Tensor([[0, 0, 0.5]])}, {1: torch.Tensor([[1, 1]])} def forward(self, *args, **kwargs): return torch.tensor([[[0, 0, 0.5], [1, 1, 0.7]]]), torch.tensor([[[2, 2]]]) From 387edff2f0a2d7abf228e566b81ad601c03c4a71 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Mon, 19 Feb 2024 20:34:28 +0900 Subject: [PATCH 16/28] Revert postprocessing --- .../models/visual_prompters/segment_anything.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index 14535b04cb2..f50afe06c8f 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -351,8 +351,6 @@ def postprocess_masks(cls, masks: Tensor, input_size: int, orig_size: Tensor) -> def get_prepadded_size(self, input_image_size: Tensor, longest_side: int) -> Tensor: """Get pre-padded size.""" - input_image_size = input_image_size.to(torch.float32) - longest_side = torch.tensor(longest_side).to(torch.float32) scale = longest_side / torch.max(input_image_size) transformed_size = scale * input_image_size return torch.floor(transformed_size + 0.5).to(torch.int64) From 182255d14163d9c60cadfc153c0a1bb3688428da Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Mon, 19 Feb 2024 20:37:25 +0900 Subject: [PATCH 17/28] precommit --- .../openvino/model_wrappers/test_openvino_models.py | 2 +- .../test_zero_shot_segment_anything.py | 10 +++------- .../algorithms/visual_prompting/tasks/test_openvino.py | 8 +++++++- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index 1db453d6e11..1b70f12aa33 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -177,7 +177,7 @@ def test_get_inputs(self): def test_postprocess(self, mocker): """Test postprocess.""" self.decoder.output_blob_name = "upscaled_masks" - self.decoder.mask_threshold = 0. + self.decoder.mask_threshold = 0.0 self.decoder.blur_strength = 2 fake_output = {"upscaled_masks": np.ones((4, 4)), "scores": 0.1} fake_metadata = {"original_size": np.array([[6, 6]]), "label": mocker.Mock(spec=LabelEntity)} diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index 3202f3b6caf..a7fb4ae5d49 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -54,14 +54,10 @@ def test_forward(self, mocker, prompt_getter, result_point_selection: torch.Tens mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" ) - mocker.patch.object( - prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2)) - ) + mocker.patch.object(prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2))) image_embeddings = torch.ones(1, 4, 4, 4) reference_feat = torch.rand(1, 4) - original_size = torch.tensor( - [[prompt_getter.image_size, prompt_getter.image_size]], dtype=torch.int64 - ) + original_size = torch.tensor([[prompt_getter.image_size, prompt_getter.image_size]], dtype=torch.int64) points_scores, bg_coords = prompt_getter( image_embeddings=image_embeddings, reference_feat=reference_feat, original_size=original_size @@ -69,7 +65,7 @@ def test_forward(self, mocker, prompt_getter, result_point_selection: torch.Tens assert torch.all(points_scores == result_point_selection) assert torch.all(bg_coords == torch.zeros(1, 2)) - + @e2e_pytest_unit @pytest.mark.parametrize( "result_point_selection", diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 717e4a7d703..78850916564 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -485,8 +485,14 @@ def _load_dataloader(module_name: str, output_model: Optional[ModelEntity] = Non dataset = generate_visual_prompting_dataset() dataset = dataset.get_subset(Subset.TRAINING) return OTXZeroShotOpenVinoDataLoader( - dataset, self.mocker_inferencer, module_name, output_model=output_model, reference_feats=np.zeros((1, 1, 1)), used_indices=np.array([[0]]) + dataset, + self.mocker_inferencer, + module_name, + output_model=output_model, + reference_feats=np.zeros((1, 1, 1)), + used_indices=np.array([[0]]), ) + return _load_dataloader @pytest.fixture(autouse=True) From e17179265eba392c12a887c067ff780e2f7f3141 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 20 Feb 2024 15:39:41 +0900 Subject: [PATCH 18/28] Update unit tests --- .../model_wrappers/openvino_models.py | 1 + .../zero_shot_segment_anything.py | 9 +- .../configs/base/configuration.py | 9 + .../visual_prompting/tasks/openvino.py | 221 ++++++++++++++++-- .../visual_prompting/tasks/test_openvino.py | 135 ++++++++++- .../visual_prompting/test_helpers.py | 5 +- 6 files changed, 354 insertions(+), 26 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 4400e6bf1bd..22dee61a7c9 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -71,6 +71,7 @@ def parameters(cls) -> Dict[str, Any]: # noqa: D102 parameters.update({"image_size": NumericalValue(value_type=int, default_value=1024, min=0, max=2048)}) parameters.update({"sim_threshold": NumericalValue(value_type=float, default_value=0.5, min=0, max=1)}) parameters.update({"num_bg_points": NumericalValue(value_type=int, default_value=1, min=0, max=1024)}) + parameters.update({"default_threshold_reference": NumericalValue(value_type=float, default_value=0.3, min=-1., max=1.)}) return parameters def _get_inputs(self): diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 99bce5ff412..37223101ef2 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -8,7 +8,7 @@ import pickle from collections import OrderedDict, defaultdict from copy import deepcopy -from datetime import datetime +import time from itertools import product from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union @@ -505,7 +505,8 @@ def _predict_masks( masks: Tensor logits: Tensor scores: Tensor - for i in range(3): + num_iter = 3 if is_cascade else 1 + for i in range(num_iter): if i == 0: # First-step prediction mask_input = torch.zeros(1, 1, *map(lambda x: x * 4, image_embeddings.shape[2:]), device=self.device) @@ -747,7 +748,7 @@ def training_epoch_end(self, outputs) -> None: self.reference_info["used_indices"].unique().unsqueeze(0), requires_grad=False ) if self.config.model.save_outputs: - path_reference_info = self.path_reference_info.format(datetime.now().strftime("%Y%m%d-%H%M%S")) + path_reference_info = self.path_reference_info.format(time.strftime("%Y%m%d-%H%M%S")) os.makedirs(os.path.dirname(path_reference_info), exist_ok=True) torch.save(self.reference_info, path_reference_info) pickle.dump( @@ -758,4 +759,4 @@ def training_epoch_end(self, outputs) -> None: repr(self.trainer.datamodule.train_dataset.dataset), open(path_reference_info.replace("reference_info.pt", "reference_meta.json"), "w"), ) - logger.info(f"Saved reference info at {path_reference_info}") + logger.info(f"Saved reference info at {path_reference_info}.") diff --git a/src/otx/algorithms/visual_prompting/configs/base/configuration.py b/src/otx/algorithms/visual_prompting/configs/base/configuration.py index d7383c28c69..61388696a09 100644 --- a/src/otx/algorithms/visual_prompting/configs/base/configuration.py +++ b/src/otx/algorithms/visual_prompting/configs/base/configuration.py @@ -131,6 +131,15 @@ class __Postprocessing(ParameterGroup): max_value=1024, affects_outcome_of=ModelLifecycle.INFERENCE, ) + + default_threshold_reference = configurable_float( + default_value=0.3, + header="Default reference threshold", + description="The threshold to get target area in the mask for reference features.", + min_value=-1.0, + max_value=1.0, + affects_outcome_of=ModelLifecycle.INFERENCE, + ) @attrs class __POTParameter(BaseConfig.BasePOTParameter): diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 2611d488a43..988043f1600 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -14,8 +14,10 @@ # See the License for the specific language governing permissions # and limitations under the License. +import cv2 import io import json +from copy import deepcopy import os import pickle import random @@ -26,6 +28,7 @@ from pathlib import Path from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Type, Union from zipfile import ZipFile +from otx.api.entities.scored_label import ScoredLabel import attr import nncf @@ -255,7 +258,7 @@ def __init__( **attr.asdict( hparams.postprocessing, filter=lambda attr, value: attr.name - in ["image_size", "sim_threshold", "num_bg_points", "embedded_processing"], + in ["image_size", "sim_threshold", "num_bg_points", "embedded_processing", "default_threshold_reference"], ) }, "decoder": { @@ -294,8 +297,8 @@ def __init__( self.point_labels_box = np.array([[2, 3]], dtype=np.float32) self.has_mask_inputs = [np.array([[0.0]]), np.array([[1.0]])] - self.reference_feats = None - self.used_indices = None + self.reference_feats: np.ndarray = None + self.used_indices: np.ndarray = None def pre_process_image_encoder( self, inputs: np.ndarray, extra_processing: bool = False @@ -303,11 +306,73 @@ def pre_process_image_encoder( """Pre-process function of OpenVINO Zero-shot Visual Prompting Inferencer for image encoder.""" return self.model["image_encoder"].preprocess(inputs, extra_processing) - def learn(self, images: np.ndarray): - """Learn.""" + def learn( + self, + dataset_item: DatasetItemEntity, + reset_feat: bool = False, + use_bbox: bool = False, + use_point: bool = False, + path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle", + ) -> Tuple[Dict[int, np.ndarray], np.ndarray]: + """Learn for reference features.""" + ref_masks: np.ndarray + self.reference_feats: np.ndarray + self.used_indices: np.ndarray + + if reset_feat or self.reference_feats is None: + self.initialize_reference_info() + + images, meta, prompts = self.pre_process(dataset_item, use_bbox, use_point) + largest_label: int = max([int(p["label"].id) for p in prompts]) + self.expand_reference_info(largest_label) + + image_embeddings = self.forward_image_encoder(images) + processed_embedding = image_embeddings["image_embeddings"].squeeze().transpose(1, 2, 0) + original_size = meta["original_shape"][:2] + + ref_masks = np.zeros((largest_label + 1, *map(int, original_size)), dtype=np.uint8) + for prompt in prompts: + if "point_coords" in prompt: + # bboxes and points + label = prompt.pop("label") + original_size = prompt.get("orig_size") + prompt.update(image_embeddings) + + prediction = self.forward_decoder(prompt, original_size, is_cascade=False) + ref_mask = prediction["upscaled_masks"] + else: + logger.warning("annotation and polygon will be supported.") + continue + ref_masks[int(label.id)] += ref_mask + + ref_masks = np.clip(ref_masks, 0, 1) + for label in range(largest_label+1): + ref_mask = ref_masks[label] + if ref_mask.sum() == 0: + # empty prediction + continue + + ref_feat = None + default_threshold_reference = deepcopy(self.model["prompt_getter"].default_threshold_reference) + while ref_feat is None: + logger.info(f"[*] default_threshold_reference : {default_threshold_reference:.4f}") + ref_feat = self._generate_masked_features( + processed_embedding, ref_masks[label], default_threshold_reference + ) + default_threshold_reference -= 0.05 + + self.reference_feats[label] = ref_feat + self.used_indices = np.concatenate((self.used_indices, np.array([[label]])), axis=1) + + reference_info = {"reference_feats": self.reference_feats, "used_indices": self.used_indices} + path_reference_info = path_reference_info.format(time.strftime("%Y%m%d-%H%M%S")) + logger.info(f"Saved reference info at {path_reference_info}.") + pickle.dump(reference_info, open(path_reference_info, "wb")) + return reference_info, ref_masks + def infer( - self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray + self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray, is_cascade: bool = False, ) -> Tuple[List[Any], DefaultDict[Any, Any], DefaultDict[Any, Any]]: """Perform a prediction for a given input image.""" points_score: np.ndarray @@ -352,7 +417,7 @@ def infer( } inputs_decoder.update(image_embeddings) - prediction = self.forward_decoder(inputs_decoder, original_size) + prediction = self.forward_decoder(inputs_decoder, original_size, is_cascade) prediction.update({"scores": points_score[-1]}) predicted_masks[label].append(prediction[self.model["decoder"].output_blob_name]) @@ -402,13 +467,14 @@ def forward_prompt_getter( return total_points_scores, total_bg_coords def forward_decoder( # type: ignore - self, inputs: Dict[str, np.ndarray], original_size: np.ndarray + self, inputs: Dict[str, np.ndarray], original_size: np.ndarray, is_cascade: bool = True, ) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" masks: np.ndarray logits: np.ndarray scores: np.ndarray - for i in range(3): + num_iter = 3 if is_cascade else 1 + for i in range(num_iter): if i == 0: # First-step prediction mask_input = np.zeros( @@ -416,7 +482,7 @@ def forward_decoder( # type: ignore ) has_mask_input = self.has_mask_inputs[0] - elif i == 1: + elif is_cascade and i == 1: # Cascaded Post-refinement-1 mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) # noqa: F821 if masks.sum() == 0: @@ -424,7 +490,7 @@ def forward_decoder( # type: ignore has_mask_input = self.has_mask_inputs[1] - elif i == 2: + elif is_cascade and i == 2: # Cascaded Post-refinement-2 mask_input, masks = self._postprocess_masks(masks, logits, scores) # noqa: F821 if masks.sum() == 0: @@ -517,27 +583,86 @@ def _calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore """Perform a prediction for a given input image.""" - if self.reference_feats is None and self.used_indices is None: - self.reference_feats, self.used_indices = self._get_reference_info() results = self.infer(dataset_item.numpy, self.reference_feats, self.used_indices) return results[0] - def _find_latest_reference_info(self) -> Union[str, None]: + def _find_latest_reference_info(self, root: str = "vpm_zsl_reference_infos") -> Union[str, None]: """Find latest reference info to be used.""" - if len(stamps := sorted(os.listdir("vpm_zsl_reference_infos"), reverse=True)) > 0: + if len(stamps := sorted(os.listdir(root), reverse=True)) > 0: return stamps[0] return None def _get_reference_info( - self, path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle" + self, root: str = "vpm_zsl_reference_infos", path_reference_info: str = "{}/reference_info.pickle" ) -> Union[Tuple[np.ndarray, np.ndarray], None]: """Get reference info through loading previously saved one or running `learn`.""" - if (latest_stamp := self._find_latest_reference_info()) is not None: + if (latest_stamp := self._find_latest_reference_info(root)) is not None: # load previously saved reference info - latest_reference_info = path_reference_info.format(latest_stamp) + latest_reference_info = os.path.join(root, path_reference_info.format(latest_stamp)) reference_info = pickle.load(open(latest_reference_info, "rb")) return reference_info["reference_feats"], reference_info["used_indices"] return None, None + + def initialize_reference_info(self) -> None: + """Initialize reference information.""" + self.reference_feats: np.ndarray = np.zeros((0, 1, 256), dtype=np.float32) + self.used_indices: np.ndarray = np.array([[]], dtype=np.int64) + + def expand_reference_info(self, new_largest_label: int) -> None: + """Expand reference info dimensions if newly given processed prompts have more lables.""" + if new_largest_label > (cur_largest_label := len(self.reference_feats) - 1): + diff = new_largest_label - cur_largest_label + self.reference_feats = np.pad(self.reference_feats, ((0, diff), (0, 0), (0, 0)), constant_values=0.0) + + def _generate_masked_features( + self, + feats: np.ndarray, + masks: np.ndarray, + threshold_mask: float, + ) -> Tuple[np.ndarray, ...]: + """Generate masked features. + + Args: + feats (np.ndarray): Raw reference features. It will be filtered with masks. + masks (np.ndarray): Reference masks used to filter features. + threshold_mask (float): Threshold to control masked region. + + Returns: + (np.ndarray): Masked features. + """ + target_shape = self.model["image_encoder"].image_size / max(masks.shape) * np.array(masks.shape) + target_shape = target_shape[::-1].astype(np.int32) + + # Post-process masks + masks = cv2.resize(masks, target_shape, interpolation=cv2.INTER_LINEAR) + masks = self._pad_to_square(masks) + masks = cv2.resize(masks, feats.shape[:2][::-1], interpolation=cv2.INTER_LINEAR) + + # Target feature extraction + if (masks > threshold_mask).sum() == 0: + # (for stability) there is no area to be extracted + return None + + masked_feat = feats[masks > threshold_mask] + masked_feat = masked_feat.mean(0)[None] + masked_feat = masked_feat / np.linalg.norm(masked_feat, axis=-1, keepdims=True) + + return masked_feat + + def _pad_to_square(self, x: np.ndarray) -> np.ndarray: + """Pad to a square input. + + Args: + x (np.ndarray): Mask to be padded. + + Returns: + (np.ndarray): Padded mask. + """ + h, w = x.shape[-2:] + padh = self.model["image_encoder"].image_size - h + padw = self.model["image_encoder"].image_size - w + x = np.pad(x, ((0, padh), (0, padw)), constant_values=0.) + return x class OTXOpenVinoDataLoader: @@ -943,6 +1068,66 @@ def load_inferencer(self) -> OpenVINOZeroShotVisualPromptingInferencer: }, num_requests=get_default_async_reqs_num(), ) + + def infer( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle" + ) -> DatasetEntity: + """Infer function of OpenVINOVisualPromptingTask. + + Currently, asynchronous execution is not supported, synchronous execution will be executed instead. + """ + if inference_parameters is not None: + update_progress_callback = inference_parameters.update_progress + enable_async_inference = inference_parameters.enable_async_inference + else: + update_progress_callback = default_progress_callback + enable_async_inference = True + + # FIXME (sungchul): Support async inference. + if enable_async_inference: + logger.warning("Asynchronous inference doesn't work, synchronous inference will be executed.") + enable_async_inference = False + predicted_validation_dataset = dataset.with_empty_annotations() + + def add_prediction(id: int, annotations: List[Annotation]): + dataset_item = predicted_validation_dataset[id] + dataset_item.append_annotations(annotations) + + total_time = 0.0 + dataset_size = len(dataset) + + if self.inferencer.reference_feats is None and self.inferencer.used_indices is None: + # set reference_feats and used_indices from previously saved reference_info + self.inferencer.reference_feats, self.inferencer.used_indices = self.inferencer._get_reference_info(path_reference_info) + if self.inferencer.reference_feats is None and self.inferencer.used_indices is None: + # if they are empty, stop inference and return empty dataset + logger.warning(( + "reference_feats and used_indices are empty, stop inference and return empty dataset. " + "Please run learn function first." + )) + return predicted_validation_dataset + + for i, dataset_item in enumerate(dataset, 1): + start_time = time.perf_counter() + + annotations = self.inferencer.predict(dataset_item) + add_prediction(i - 1, annotations) + + end_time = time.perf_counter() - start_time + total_time += end_time + update_progress_callback(int(i / dataset_size * 100), None) + + self.inferencer.await_all() + + self._avg_time_per_image = total_time / len(dataset) + logger.info(f"Avg time per image: {self._avg_time_per_image} secs") + logger.info(f"Total time: {total_time} secs") + logger.info("Visual Prompting OpenVINO inference completed") + + return predicted_validation_dataset def optimize( self, diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 78850916564..d548679d2c8 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -49,6 +49,7 @@ generate_visual_prompting_dataset, init_environment, ) +from tests.unit.algorithms.visual_prompting.test_helpers import MockScoredLabel class TestOpenVINOVisualPromptingInferencer: @@ -200,11 +201,54 @@ def setup(self, mocker): {"image_encoder": "", "prompt_getter": "", "decoder": ""}, ) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"] = mocker.patch( - "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", autospec=True + "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", autospec=True, ) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.3 self.zero_shot_visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].output_blob_name = "upscaled_masks" + + @e2e_pytest_unit + def test_learn(self, mocker): + """Test learn.""" + mocker_pre_process = mocker.patch.object( + OpenVINOVisualPromptingInferencer, + "pre_process", + return_value=( + torch.zeros((1, 3, 2, 2)), + {"original_shape": np.array((4, 4))}, + [ + { + "point_coords": [np.array([[[1, 1], [2, 2]]])], + "point_labels": [1, 2], + "label": MockScoredLabel(label=0, name="fake"), + "orig_size": (4, 4), + } + ], + ), + ) + mocker_forward_image_encoder = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, + "forward_image_encoder", + return_value={"image_embeddings": np.empty((4, 2, 2))}, + ) + mocker_generate_masked_features = mocker.patch.object(OpenVINOZeroShotVisualPromptingInferencer, "_generate_masked_features", return_value=torch.ones(1, 256)) + + self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].infer_sync.return_value = { + "upscaled_masks": np.ones((1, 4, 4, 4), dtype=np.bool), "iou_predictions": np.array([[0.9, 0.7, 0.9, 0.8]]), "low_res_masks": np.ones((1, 4, 2, 2)),} + mocker_pickle_dump = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.pickle.dump") + mocker.patch("builtins.open", return_value="Mocked data") + self.zero_shot_visual_prompting_ov_inferencer.model["prompt_getter"].default_threshold_reference = 0.3 + + fake_input = mocker.Mock(spec=DatasetItemEntity) + results = self.zero_shot_visual_prompting_ov_inferencer.learn(fake_input, reset_feat=True) + + assert results[0]["reference_feats"].shape == (1, 1, 256) + assert results[0]["used_indices"] == np.array([[0]]) + assert np.all(results[1] == np.ones((1, 4, 4))) + mocker_pre_process.assert_called_once() + mocker_forward_image_encoder.assert_called_once() + mocker_generate_masked_features.assert_called_once() + mocker_pickle_dump.assert_called_once() @e2e_pytest_unit def test_predict(self, mocker): @@ -230,16 +274,33 @@ def test_predict(self, mocker): mocker_post_process = mocker.patch.object( OpenVINOZeroShotVisualPromptingInferencer, "post_process", return_value=(self.fake_annotation, None, None) ) + self.zero_shot_visual_prompting_ov_inferencer.reference_feats = np.random.rand(1, 1, 1) + self.zero_shot_visual_prompting_ov_inferencer.used_indices = np.array([[0]]) fake_input = mocker.Mock(spec=DatasetItemEntity) - returned_value = self.zero_shot_visual_prompting_ov_inferencer.predict(fake_input) + results = self.zero_shot_visual_prompting_ov_inferencer.predict(fake_input) mocker_pre_process.assert_called_once() mocker_forward.assert_called_once() mocker_forward_decoder.assert_called_once() mocker_post_process.assert_called_once() - assert returned_value == self.fake_annotation + assert results == self.fake_annotation + @e2e_pytest_unit + def test_forward_prompt_getter(self): + """Test forward_prompt_getter.""" + self.zero_shot_visual_prompting_ov_inferencer.model["prompt_getter"].infer_sync.return_value = { + "points_scores": np.array([[1, 1, 0.5]]), "bg_coords": np.array([[0, 0]])} + + total_points_scores, total_bg_coords = self.zero_shot_visual_prompting_ov_inferencer.forward_prompt_getter( + image_embeddings={"image_embeddings": np.empty((4, 2, 2))}, + reference_feats=np.random.rand(1, 1, 1), + used_indices=np.array([[0]]), + original_size=np.array([4, 4])) + + assert np.all(total_points_scores[0] == np.array([[1, 1, 0.5]])) + assert np.all(total_bg_coords[0] == np.array([[0, 0]])) + @e2e_pytest_unit @pytest.mark.parametrize( "postprocess_output,infer_sync_output,expected", @@ -429,6 +490,74 @@ def test_inspect_overlapping_areas(self) -> None: assert len(predicted_masks[1]) == 3 assert all(np.array([2, 2, 0.5]) == used_points[0][0]) assert all(np.array([0, 0, 0.7]) == used_points[1][2]) + + @e2e_pytest_unit + def test_find_latest_reference_info(self, mocker): + """Test _find_latest_reference_info.""" + # there are some saved reference info + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=["1", "2"]) + results = self.zero_shot_visual_prompting_ov_inferencer._find_latest_reference_info() + assert results == "2" + + # there are no saved reference info + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=[]) + results = self.zero_shot_visual_prompting_ov_inferencer._find_latest_reference_info() + assert results is None + + @e2e_pytest_unit + def test_get_reference_info(self, mocker): + """Test _get_reference_info.""" + # get previously saved reference info + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=["1", "2"]) + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.pickle.load", return_value={"reference_feats": 1, "used_indices": 2}) + mocker.patch("builtins.open", return_value="Mocked data") + + results = self.zero_shot_visual_prompting_ov_inferencer._get_reference_info() + assert results == (1, 2) + + # no saved reference info + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=[]) + + results = self.zero_shot_visual_prompting_ov_inferencer._get_reference_info() + assert results == (None, None) + + @e2e_pytest_unit + def test_expand_reference_info(self): + """Test expand_reference_info.""" + self.zero_shot_visual_prompting_ov_inferencer.reference_feats = np.ones((3, 2, 2)) + new_largest_label = 5 + + self.zero_shot_visual_prompting_ov_inferencer.expand_reference_info(new_largest_label) + + assert self.zero_shot_visual_prompting_ov_inferencer.reference_feats.shape == (6, 2, 2) + assert np.all(self.zero_shot_visual_prompting_ov_inferencer.reference_feats[:3] == 1.0) + assert np.all(self.zero_shot_visual_prompting_ov_inferencer.reference_feats[3:] == 0.0) + + @e2e_pytest_unit + def test_generate_masked_features(self) -> None: + """Test _generate_masked_features.""" + self.zero_shot_visual_prompting_ov_inferencer.model["image_encoder"].image_size = 16 + feats = np.random.rand(8, 8, 1) + masks = np.zeros((16, 16), dtype=np.float32) + masks[4:12, 4:12] = 1.0 + + masked_feat = self.zero_shot_visual_prompting_ov_inferencer._generate_masked_features( + feats=feats, masks=masks, threshold_mask=0.3) + + assert masked_feat.shape == (1, 1) + + @e2e_pytest_unit + def test_pad_to_square(self) -> None: + """Test _pad_to_square.""" + self.zero_shot_visual_prompting_ov_inferencer.model["image_encoder"].image_size = 16 + + result = self.zero_shot_visual_prompting_ov_inferencer._pad_to_square(x=np.ones((8, 8))) + + assert result[:8, :8].sum() == 8**2 + assert result[:8, 8:].sum() == 0 + assert result[8:, :8].sum() == 0 + assert result[8:, 8:].sum() == 0 + class TestOTXOpenVinoDataLoader: diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index a1b077cf557..5d79549c670 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -184,10 +184,13 @@ def predict_mask(self, *args, **kwargs): class MockScoredLabel: - def __init__(self, label: int, name: str = "background"): + def __init__(self, label: int, name: str = "background", probability: float = 0., label_source = None,): self.name = name self.label = Mock() self.label.id_ = label + self.label.id = label + self.probability = probability + self.label_source = label_source self.__class__ = ScoredLabel From 43bc6c2569c7f60a528e6eca1750221bd271ece5 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 20 Feb 2024 15:42:17 +0900 Subject: [PATCH 19/28] precommit --- .../model_wrappers/openvino_models.py | 4 +- .../zero_shot_segment_anything.py | 2 +- .../configs/base/configuration.py | 2 +- .../visual_prompting/tasks/openvino.py | 79 +++++++++++-------- .../visual_prompting/tasks/test_openvino.py | 58 ++++++++------ .../visual_prompting/test_helpers.py | 8 +- 6 files changed, 93 insertions(+), 60 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index 22dee61a7c9..3283026252b 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -71,7 +71,9 @@ def parameters(cls) -> Dict[str, Any]: # noqa: D102 parameters.update({"image_size": NumericalValue(value_type=int, default_value=1024, min=0, max=2048)}) parameters.update({"sim_threshold": NumericalValue(value_type=float, default_value=0.5, min=0, max=1)}) parameters.update({"num_bg_points": NumericalValue(value_type=int, default_value=1, min=0, max=1024)}) - parameters.update({"default_threshold_reference": NumericalValue(value_type=float, default_value=0.3, min=-1., max=1.)}) + parameters.update( + {"default_threshold_reference": NumericalValue(value_type=float, default_value=0.3, min=-1.0, max=1.0)} + ) return parameters def _get_inputs(self): diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index 37223101ef2..b54f1180b49 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -6,9 +6,9 @@ import json import os import pickle +import time from collections import OrderedDict, defaultdict from copy import deepcopy -import time from itertools import product from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union diff --git a/src/otx/algorithms/visual_prompting/configs/base/configuration.py b/src/otx/algorithms/visual_prompting/configs/base/configuration.py index 61388696a09..2961fa8a837 100644 --- a/src/otx/algorithms/visual_prompting/configs/base/configuration.py +++ b/src/otx/algorithms/visual_prompting/configs/base/configuration.py @@ -131,7 +131,7 @@ class __Postprocessing(ParameterGroup): max_value=1024, affects_outcome_of=ModelLifecycle.INFERENCE, ) - + default_threshold_reference = configurable_float( default_value=0.3, header="Default reference threshold", diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 988043f1600..bbffbe9ce22 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -14,23 +14,22 @@ # See the License for the specific language governing permissions # and limitations under the License. -import cv2 import io import json -from copy import deepcopy import os import pickle import random import tempfile import time from collections import defaultdict +from copy import deepcopy from itertools import product from pathlib import Path from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Type, Union from zipfile import ZipFile -from otx.api.entities.scored_label import ScoredLabel import attr +import cv2 import nncf import numpy as np import openvino.runtime as ov @@ -258,7 +257,13 @@ def __init__( **attr.asdict( hparams.postprocessing, filter=lambda attr, value: attr.name - in ["image_size", "sim_threshold", "num_bg_points", "embedded_processing", "default_threshold_reference"], + in [ + "image_size", + "sim_threshold", + "num_bg_points", + "embedded_processing", + "default_threshold_reference", + ], ) }, "decoder": { @@ -297,8 +302,8 @@ def __init__( self.point_labels_box = np.array([[2, 3]], dtype=np.float32) self.has_mask_inputs = [np.array([[0.0]]), np.array([[1.0]])] - self.reference_feats: np.ndarray = None - self.used_indices: np.ndarray = None + self.reference_feats: Optional[np.ndarray] = None + self.used_indices: Optional[np.ndarray] = None def pre_process_image_encoder( self, inputs: np.ndarray, extra_processing: bool = False @@ -313,23 +318,21 @@ def learn( use_bbox: bool = False, use_point: bool = False, path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle", - ) -> Tuple[Dict[int, np.ndarray], np.ndarray]: + ) -> Tuple[Dict[str, np.ndarray], np.ndarray]: """Learn for reference features.""" ref_masks: np.ndarray - self.reference_feats: np.ndarray - self.used_indices: np.ndarray if reset_feat or self.reference_feats is None: self.initialize_reference_info() - + images, meta, prompts = self.pre_process(dataset_item, use_bbox, use_point) largest_label: int = max([int(p["label"].id) for p in prompts]) self.expand_reference_info(largest_label) - + image_embeddings = self.forward_image_encoder(images) processed_embedding = image_embeddings["image_embeddings"].squeeze().transpose(1, 2, 0) original_size = meta["original_shape"][:2] - + ref_masks = np.zeros((largest_label + 1, *map(int, original_size)), dtype=np.uint8) for prompt in prompts: if "point_coords" in prompt: @@ -337,16 +340,16 @@ def learn( label = prompt.pop("label") original_size = prompt.get("orig_size") prompt.update(image_embeddings) - + prediction = self.forward_decoder(prompt, original_size, is_cascade=False) ref_mask = prediction["upscaled_masks"] else: logger.warning("annotation and polygon will be supported.") continue ref_masks[int(label.id)] += ref_mask - + ref_masks = np.clip(ref_masks, 0, 1) - for label in range(largest_label+1): + for label in range(largest_label + 1): ref_mask = ref_masks[label] if ref_mask.sum() == 0: # empty prediction @@ -363,16 +366,19 @@ def learn( self.reference_feats[label] = ref_feat self.used_indices = np.concatenate((self.used_indices, np.array([[label]])), axis=1) - + reference_info = {"reference_feats": self.reference_feats, "used_indices": self.used_indices} path_reference_info = path_reference_info.format(time.strftime("%Y%m%d-%H%M%S")) logger.info(f"Saved reference info at {path_reference_info}.") pickle.dump(reference_info, open(path_reference_info, "wb")) return reference_info, ref_masks - def infer( - self, images: np.ndarray, reference_feats: np.ndarray, used_indices: np.ndarray, is_cascade: bool = False, + self, + images: np.ndarray, + reference_feats: np.ndarray, + used_indices: np.ndarray, + is_cascade: bool = False, ) -> Tuple[List[Any], DefaultDict[Any, Any], DefaultDict[Any, Any]]: """Perform a prediction for a given input image.""" points_score: np.ndarray @@ -467,7 +473,10 @@ def forward_prompt_getter( return total_points_scores, total_bg_coords def forward_decoder( # type: ignore - self, inputs: Dict[str, np.ndarray], original_size: np.ndarray, is_cascade: bool = True, + self, + inputs: Dict[str, np.ndarray], + original_size: np.ndarray, + is_cascade: bool = True, ) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" masks: np.ndarray @@ -602,18 +611,18 @@ def _get_reference_info( reference_info = pickle.load(open(latest_reference_info, "rb")) return reference_info["reference_feats"], reference_info["used_indices"] return None, None - + def initialize_reference_info(self) -> None: """Initialize reference information.""" - self.reference_feats: np.ndarray = np.zeros((0, 1, 256), dtype=np.float32) - self.used_indices: np.ndarray = np.array([[]], dtype=np.int64) + self.reference_feats = np.zeros((0, 1, 256), dtype=np.float32) + self.used_indices = np.array([[]], dtype=np.int64) def expand_reference_info(self, new_largest_label: int) -> None: """Expand reference info dimensions if newly given processed prompts have more lables.""" if new_largest_label > (cur_largest_label := len(self.reference_feats) - 1): diff = new_largest_label - cur_largest_label self.reference_feats = np.pad(self.reference_feats, ((0, diff), (0, 0), (0, 0)), constant_values=0.0) - + def _generate_masked_features( self, feats: np.ndarray, @@ -648,7 +657,7 @@ def _generate_masked_features( masked_feat = masked_feat / np.linalg.norm(masked_feat, axis=-1, keepdims=True) return masked_feat - + def _pad_to_square(self, x: np.ndarray) -> np.ndarray: """Pad to a square input. @@ -661,7 +670,7 @@ def _pad_to_square(self, x: np.ndarray) -> np.ndarray: h, w = x.shape[-2:] padh = self.model["image_encoder"].image_size - h padw = self.model["image_encoder"].image_size - w - x = np.pad(x, ((0, padh), (0, padw)), constant_values=0.) + x = np.pad(x, ((0, padh), (0, padw)), constant_values=0.0) return x @@ -1068,12 +1077,12 @@ def load_inferencer(self) -> OpenVINOZeroShotVisualPromptingInferencer: }, num_requests=get_default_async_reqs_num(), ) - + def infer( self, dataset: DatasetEntity, inference_parameters: Optional[InferenceParameters] = None, - path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle" + path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle", ) -> DatasetEntity: """Infer function of OpenVINOVisualPromptingTask. @@ -1098,16 +1107,20 @@ def add_prediction(id: int, annotations: List[Annotation]): total_time = 0.0 dataset_size = len(dataset) - + if self.inferencer.reference_feats is None and self.inferencer.used_indices is None: # set reference_feats and used_indices from previously saved reference_info - self.inferencer.reference_feats, self.inferencer.used_indices = self.inferencer._get_reference_info(path_reference_info) + self.inferencer.reference_feats, self.inferencer.used_indices = self.inferencer._get_reference_info( + path_reference_info + ) if self.inferencer.reference_feats is None and self.inferencer.used_indices is None: # if they are empty, stop inference and return empty dataset - logger.warning(( - "reference_feats and used_indices are empty, stop inference and return empty dataset. " - "Please run learn function first." - )) + logger.warning( + ( + "reference_feats and used_indices are empty, stop inference and return empty dataset. " + "Please run learn function first." + ) + ) return predicted_validation_dataset for i, dataset_item in enumerate(dataset, 1): diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index d548679d2c8..6aee906b6be 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -201,12 +201,13 @@ def setup(self, mocker): {"image_encoder": "", "prompt_getter": "", "decoder": ""}, ) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"] = mocker.patch( - "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", autospec=True, + "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", + autospec=True, ) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.3 self.zero_shot_visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].output_blob_name = "upscaled_masks" - + @e2e_pytest_unit def test_learn(self, mocker): """Test learn.""" @@ -231,14 +232,19 @@ def test_learn(self, mocker): "forward_image_encoder", return_value={"image_embeddings": np.empty((4, 2, 2))}, ) - mocker_generate_masked_features = mocker.patch.object(OpenVINOZeroShotVisualPromptingInferencer, "_generate_masked_features", return_value=torch.ones(1, 256)) + mocker_generate_masked_features = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, "_generate_masked_features", return_value=torch.ones(1, 256) + ) self.zero_shot_visual_prompting_ov_inferencer.model["decoder"].infer_sync.return_value = { - "upscaled_masks": np.ones((1, 4, 4, 4), dtype=np.bool), "iou_predictions": np.array([[0.9, 0.7, 0.9, 0.8]]), "low_res_masks": np.ones((1, 4, 2, 2)),} + "upscaled_masks": np.ones((1, 4, 4, 4), dtype=np.bool), + "iou_predictions": np.array([[0.9, 0.7, 0.9, 0.8]]), + "low_res_masks": np.ones((1, 4, 2, 2)), + } mocker_pickle_dump = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.pickle.dump") mocker.patch("builtins.open", return_value="Mocked data") self.zero_shot_visual_prompting_ov_inferencer.model["prompt_getter"].default_threshold_reference = 0.3 - + fake_input = mocker.Mock(spec=DatasetItemEntity) results = self.zero_shot_visual_prompting_ov_inferencer.learn(fake_input, reset_feat=True) @@ -290,17 +296,20 @@ def test_predict(self, mocker): def test_forward_prompt_getter(self): """Test forward_prompt_getter.""" self.zero_shot_visual_prompting_ov_inferencer.model["prompt_getter"].infer_sync.return_value = { - "points_scores": np.array([[1, 1, 0.5]]), "bg_coords": np.array([[0, 0]])} - + "points_scores": np.array([[1, 1, 0.5]]), + "bg_coords": np.array([[0, 0]]), + } + total_points_scores, total_bg_coords = self.zero_shot_visual_prompting_ov_inferencer.forward_prompt_getter( image_embeddings={"image_embeddings": np.empty((4, 2, 2))}, reference_feats=np.random.rand(1, 1, 1), used_indices=np.array([[0]]), - original_size=np.array([4, 4])) - + original_size=np.array([4, 4]), + ) + assert np.all(total_points_scores[0] == np.array([[1, 1, 0.5]])) assert np.all(total_bg_coords[0] == np.array([[0, 0]])) - + @e2e_pytest_unit @pytest.mark.parametrize( "postprocess_output,infer_sync_output,expected", @@ -490,7 +499,7 @@ def test_inspect_overlapping_areas(self) -> None: assert len(predicted_masks[1]) == 3 assert all(np.array([2, 2, 0.5]) == used_points[0][0]) assert all(np.array([0, 0, 0.7]) == used_points[1][2]) - + @e2e_pytest_unit def test_find_latest_reference_info(self, mocker): """Test _find_latest_reference_info.""" @@ -498,29 +507,32 @@ def test_find_latest_reference_info(self, mocker): mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=["1", "2"]) results = self.zero_shot_visual_prompting_ov_inferencer._find_latest_reference_info() assert results == "2" - + # there are no saved reference info mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=[]) results = self.zero_shot_visual_prompting_ov_inferencer._find_latest_reference_info() assert results is None - + @e2e_pytest_unit def test_get_reference_info(self, mocker): """Test _get_reference_info.""" # get previously saved reference info mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=["1", "2"]) - mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.pickle.load", return_value={"reference_feats": 1, "used_indices": 2}) + mocker.patch( + "otx.algorithms.visual_prompting.tasks.openvino.pickle.load", + return_value={"reference_feats": 1, "used_indices": 2}, + ) mocker.patch("builtins.open", return_value="Mocked data") - + results = self.zero_shot_visual_prompting_ov_inferencer._get_reference_info() assert results == (1, 2) - + # no saved reference info mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=[]) - + results = self.zero_shot_visual_prompting_ov_inferencer._get_reference_info() assert results == (None, None) - + @e2e_pytest_unit def test_expand_reference_info(self): """Test expand_reference_info.""" @@ -528,11 +540,11 @@ def test_expand_reference_info(self): new_largest_label = 5 self.zero_shot_visual_prompting_ov_inferencer.expand_reference_info(new_largest_label) - + assert self.zero_shot_visual_prompting_ov_inferencer.reference_feats.shape == (6, 2, 2) assert np.all(self.zero_shot_visual_prompting_ov_inferencer.reference_feats[:3] == 1.0) assert np.all(self.zero_shot_visual_prompting_ov_inferencer.reference_feats[3:] == 0.0) - + @e2e_pytest_unit def test_generate_masked_features(self) -> None: """Test _generate_masked_features.""" @@ -542,10 +554,11 @@ def test_generate_masked_features(self) -> None: masks[4:12, 4:12] = 1.0 masked_feat = self.zero_shot_visual_prompting_ov_inferencer._generate_masked_features( - feats=feats, masks=masks, threshold_mask=0.3) + feats=feats, masks=masks, threshold_mask=0.3 + ) assert masked_feat.shape == (1, 1) - + @e2e_pytest_unit def test_pad_to_square(self) -> None: """Test _pad_to_square.""" @@ -557,7 +570,6 @@ def test_pad_to_square(self) -> None: assert result[:8, 8:].sum() == 0 assert result[8:, :8].sum() == 0 assert result[8:, 8:].sum() == 0 - class TestOTXOpenVinoDataLoader: diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index 5d79549c670..445bc0f2ba1 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -184,7 +184,13 @@ def predict_mask(self, *args, **kwargs): class MockScoredLabel: - def __init__(self, label: int, name: str = "background", probability: float = 0., label_source = None,): + def __init__( + self, + label: int, + name: str = "background", + probability: float = 0.0, + label_source=None, + ): self.name = name self.label = Mock() self.label.id_ = label From 3966f36f8e66f9f2aae0e200a9ee098f26af8f96 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 20 Feb 2024 15:47:37 +0900 Subject: [PATCH 20/28] Fix --- src/otx/algorithms/visual_prompting/tasks/openvino.py | 7 +++++-- tests/integration/cli/visual_prompting/test_zero_shot.py | 8 ++++---- .../algorithms/visual_prompting/tasks/test_openvino.py | 4 ++++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index bbffbe9ce22..ada64d3c937 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -597,6 +597,8 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: def _find_latest_reference_info(self, root: str = "vpm_zsl_reference_infos") -> Union[str, None]: """Find latest reference info to be used.""" + if not os.path.isdir(root): + return None if len(stamps := sorted(os.listdir(root), reverse=True)) > 0: return stamps[0] return None @@ -1082,7 +1084,8 @@ def infer( self, dataset: DatasetEntity, inference_parameters: Optional[InferenceParameters] = None, - path_reference_info: str = "vpm_zsl_reference_infos/{}/reference_info.pickle", + root: str = "vpm_zsl_reference_infos", + path_reference_info: str = "{}/reference_info.pickle", ) -> DatasetEntity: """Infer function of OpenVINOVisualPromptingTask. @@ -1111,7 +1114,7 @@ def add_prediction(id: int, annotations: List[Annotation]): if self.inferencer.reference_feats is None and self.inferencer.used_indices is None: # set reference_feats and used_indices from previously saved reference_info self.inferencer.reference_feats, self.inferencer.used_indices = self.inferencer._get_reference_info( - path_reference_info + root, path_reference_info ) if self.inferencer.reference_feats is None and self.inferencer.used_indices is None: # if they are empty, stop inference and return empty dataset diff --git a/tests/integration/cli/visual_prompting/test_zero_shot.py b/tests/integration/cli/visual_prompting/test_zero_shot.py index ccedf5c2fa2..33b5b433628 100644 --- a/tests/integration/cli/visual_prompting/test_zero_shot.py +++ b/tests/integration/cli/visual_prompting/test_zero_shot.py @@ -17,10 +17,10 @@ ) args = { - "--train-data-roots": "tests/assets/car_tree_bug", - "--val-data-roots": "tests/assets/car_tree_bug", - "--test-data-roots": "tests/assets/car_tree_bug", - "--input": "tests/assets/car_tree_bug/images/train", + "--train-data-roots": "tests/assets/car_tree_bug_zero_shot", + "--val-data-roots": "tests/assets/car_tree_bug_zero_shot", + "--test-data-roots": "tests/assets/car_tree_bug_zero_shot", + "--input": "tests/assets/car_tree_bug_zero_shot/images/train", "train_params": [ "params", "--learning_parameters.trainer.max_epochs", diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 6aee906b6be..39c5fcfafd8 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -503,6 +503,8 @@ def test_inspect_overlapping_areas(self) -> None: @e2e_pytest_unit def test_find_latest_reference_info(self, mocker): """Test _find_latest_reference_info.""" + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.path.isdir", return_value=True) + # there are some saved reference info mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=["1", "2"]) results = self.zero_shot_visual_prompting_ov_inferencer._find_latest_reference_info() @@ -516,6 +518,8 @@ def test_find_latest_reference_info(self, mocker): @e2e_pytest_unit def test_get_reference_info(self, mocker): """Test _get_reference_info.""" + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.path.isdir", return_value=True) + # get previously saved reference info mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.os.listdir", return_value=["1", "2"]) mocker.patch( From b3448f67471fcff6e21b52de6d5b22756eef89f7 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Tue, 20 Feb 2024 17:36:13 +0900 Subject: [PATCH 21/28] Fix for unit test coverage --- .../zero_shot_segment_anything.py | 7 +- .../test_zero_shot_segment_anything.py | 85 +++++++++++++++++-- 2 files changed, 84 insertions(+), 8 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index b54f1180b49..5948f2e42bb 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -723,11 +723,12 @@ def configure_optimizers(self) -> None: """Skip configure_optimizers unused in zero-shot learning.""" pass - def _find_latest_reference_info(self) -> Union[str, None]: + def _find_latest_reference_info(self, root: str = "vpm_zsl_reference_infos") -> Union[str, None]: """Find latest reference info to be used.""" - if len(stamps := sorted(os.listdir("vpm_zsl_reference_infos"), reverse=True)) > 0: + if not os.path.isdir(root): + return None + if len(stamps := sorted(os.listdir(root), reverse=True)) > 0: return stamps[0] - self.initialize_reference_info() return None def on_train_start(self) -> None: diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index a7fb4ae5d49..5f1812adf86 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -73,9 +73,7 @@ def test_forward(self, mocker, prompt_getter, result_point_selection: torch.Tens ) def test_get_prompt_candidates(self, mocker, prompt_getter, result_point_selection: torch.Tensor) -> None: """Test get_prompt_candidates.""" - mocker.patch.object( - prompt_getter, "get_prompt_candidates", return_value=(result_point_selection, torch.zeros(1, 2)) - ) + mocker.patch.object(prompt_getter, "_point_selection", return_value=(result_point_selection, torch.zeros(1, 2))) image_embeddings = torch.ones(1, 4, 4, 4) reference_feats = torch.rand(1, 1, 4) used_indices = torch.as_tensor([[0]]) @@ -88,8 +86,8 @@ def test_get_prompt_candidates(self, mocker, prompt_getter, result_point_selecti original_size=original_size, ) - assert total_points_scores.shape[0] == len(result_point_selection) - assert total_bg_coords.shape[0] == 1 + assert total_points_scores[0].shape[0] == len(result_point_selection) + assert total_bg_coords[0].shape[0] == 1 @e2e_pytest_unit @pytest.mark.parametrize( @@ -426,3 +424,80 @@ def test_postprocess_masks( _, result = zero_shot_segment_anything._postprocess_masks(masks, logits, scores) assert torch.equal(result, expected) + + @e2e_pytest_unit + def test_find_latest_reference_info(self, mocker, set_zero_shot_segment_anything): + """Test _find_latest_reference_info.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.os.path.isdir", + return_value=True, + ) + + # there are some saved reference info + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.os.listdir", + return_value=["1", "2"], + ) + results = zero_shot_segment_anything._find_latest_reference_info() + assert results == "2" + + # there are no saved reference info + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.os.listdir", + return_value=[], + ) + results = zero_shot_segment_anything._find_latest_reference_info() + assert results is None + + @e2e_pytest_unit + def test_on_predict_start(self, mocker, set_zero_shot_segment_anything): + """Test on_predict_start.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.os.path.isdir", + return_value=True, + ) + + # get previously saved reference info + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.os.listdir", + return_value=["1", "2"], + ) + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.torch.load", + return_value=torch.nn.ParameterDict( + {"reference_feats": torch.zeros((1, 1, 256)), "used_indices": torch.tensor([[0.0]])} + ), + ) + mocker.patch("builtins.open", return_value="Mocked data") + + zero_shot_segment_anything.on_predict_start() + assert isinstance(zero_shot_segment_anything.reference_info, torch.nn.ParameterDict) + assert zero_shot_segment_anything.reference_info["reference_feats"].shape == (1, 1, 256) + assert zero_shot_segment_anything.reference_info["used_indices"].shape == (1, 1) + + # no saved reference info + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.os.listdir", + return_value=[], + ) + + zero_shot_segment_anything.set_empty_reference_info() + zero_shot_segment_anything.on_predict_start() + + assert zero_shot_segment_anything.reference_info["reference_feats"].shape == (0,) + assert zero_shot_segment_anything.reference_info["used_indices"].shape == (1, 0) + + @e2e_pytest_unit + def test_expand_reference_info(self, set_zero_shot_segment_anything): + """Test expand_reference_info.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything.reference_info["reference_feats"] = torch.ones((3, 2, 2)) + new_largest_label = 5 + + zero_shot_segment_anything.expand_reference_info(new_largest_label) + + assert zero_shot_segment_anything.reference_info["reference_feats"].shape == (6, 2, 2) + assert torch.all(zero_shot_segment_anything.reference_info["reference_feats"][:3] == 1.0) + assert torch.all(zero_shot_segment_anything.reference_info["reference_feats"][3:] == 0.0) From 5c6ffd27cc5088a84a660c52268fcf7471627aa7 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 22 Feb 2024 14:59:37 +0900 Subject: [PATCH 22/28] Refactor model loading and state dict handling --- .../visual_prompters/segment_anything.py | 20 ++--- .../visual_prompters/test_segment_anything.py | 88 ++++++++++--------- 2 files changed, 50 insertions(+), 58 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index f50afe06c8f..9327f7f2b84 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -138,26 +138,13 @@ def load_checkpoint(self, state_dict: Optional[OrderedDict] = None) -> None: Args: state_dict (Optional[OrderedDict], optional): State dict of SAM. Defaults to None. """ - - def skip_unused_parameters(state_dict): - if self.config.model.backbone == "tiny_vit": - for key in [ - "image_encoder.norm_head.weight", - "image_encoder.norm_head.bias", - "image_encoder.head.weight", - "image_encoder.head.bias", - ]: - if key in state_dict: - state_dict.pop(key) - if state_dict: # state_dict from args.load_from - skip_unused_parameters(state_dict) self.load_state_dict(state_dict) elif self.config.model.checkpoint: if str(self.config.model.checkpoint).endswith(".ckpt"): # load lightning checkpoint - self.load_from_checkpoint(self.config.model.checkpoint) + self.load_from_checkpoint(self.config.model.checkpoint, strict=False) else: if str(self.config.model.checkpoint).startswith("http"): # get checkpoint from url @@ -167,8 +154,11 @@ def skip_unused_parameters(state_dict): with open(self.config.model.checkpoint, "rb") as f: state_dict = torch.load(f) - skip_unused_parameters(state_dict) self.load_state_dict(state_dict, strict=False) + else: + # use default checkpoint + state_dict = torch.hub.load_state_dict_from_url(CKPT_PATHS[self.config.model.backbone]) + self.load_state_dict(state_dict, strict=False) ########################################################## # forward for inference (export/deploy/optimize) # diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index 8f3f727e533..a1a49b0faea 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -153,37 +153,21 @@ def test_set_metrics(self, mocker, loss_type: str): @e2e_pytest_unit @pytest.mark.parametrize( - "is_backbone_arg,state_dict", + "state_dict", [ - ( - False, - OrderedDict( - [ - ("image_encoder.weight", torch.ones(4, 4)), - ("image_encoder.bias", torch.ones(4)), - ("prompt_encoder.layer.weight", Tensor([[0.0]])), - ("prompt_encoder.layer.bias", Tensor([0.0])), - ("mask_decoder.layer.weight", Tensor([[0.0]])), - ("mask_decoder.layer.bias", Tensor([0.0])), - ] - ), - ), - ( - True, - OrderedDict( - [ - ("image_encoder.weight", torch.ones(4, 4)), - ("image_encoder.bias", torch.ones(4)), - ("prompt_encoder.layer.weight", Tensor([[1.0]])), - ("prompt_encoder.layer.bias", Tensor([1.0])), - ("mask_decoder.layer.weight", Tensor([[1.0]])), - ("mask_decoder.layer.bias", Tensor([1.0])), - ] - ), + OrderedDict( + [ + ("image_encoder.weight", torch.ones(4, 4)), + ("image_encoder.bias", torch.ones(4)), + ("prompt_encoder.layer.weight", Tensor([[1.0]])), + ("prompt_encoder.layer.bias", Tensor([1.0])), + ("mask_decoder.layer.weight", Tensor([[1.0]])), + ("mask_decoder.layer.bias", Tensor([1.0])), + ] ), ], ) - def test_load_checkpoint_with_state_dict(self, mocker, is_backbone_arg: bool, state_dict: OrderedDict): + def test_load_checkpoint_with_state_dict(self, mocker, state_dict: OrderedDict): """Test load_checkpoint with state_dict.""" mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.freeze_networks" @@ -191,30 +175,18 @@ def test_load_checkpoint_with_state_dict(self, mocker, is_backbone_arg: bool, st mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.set_metrics" ) + mocker_load_state_dict = mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_state_dict" + ) sam = SegmentAnything(self.base_config, state_dict=state_dict) sam_state_dict = sam.state_dict() + mocker_load_state_dict.assert_called_once() for k, v in state_dict.items(): assert k in sam_state_dict assert torch.all(v == sam_state_dict[k]) - @e2e_pytest_unit - def test_load_checkpoint_without_checkpoint(self, mocker): - """Test load_checkpoint without checkpoint.""" - mocker.patch( - "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.freeze_networks" - ) - mocker.patch( - "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.set_metrics" - ) - config = self.base_config.copy() - config.model.update(dict(checkpoint=None)) - - sam = SegmentAnything(config, state_dict=None) - - assert True - @e2e_pytest_unit def test_load_checkpoint_with_url(self, mocker): """Test load_checkpoint with url.""" @@ -228,12 +200,16 @@ def test_load_checkpoint_with_url(self, mocker): mocker_load_state_dict = mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_state_dict" ) + mocker_load_from_checkpoint = mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_from_checkpoint" + ) config = self.base_config.copy() config.model.update(dict(checkpoint="http://checkpoint")) sam = SegmentAnything(config, state_dict=None) + mocker_load_from_checkpoint.assert_not_called() mocker_load_state_dict_from_url.assert_called_once() mocker_load_state_dict.assert_called_once() @@ -263,8 +239,34 @@ def test_load_checkpoint_from_local_checkpoint(self, mocker, monkeypatch, checkp if checkpoint.endswith(".ckpt"): mocker_load_from_checkpoint.assert_called_once() + mocker_load_state_dict.assert_not_called() else: + mocker_load_from_checkpoint.assert_not_called() mocker_load_state_dict.assert_called_once() + + @e2e_pytest_unit + def test_load_checkpoint_without_checkpoint(self, mocker): + """Test load_checkpoint without checkpoint.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.freeze_networks" + ) + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.set_metrics" + ) + mocker_load_from_checkpoint = mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_from_checkpoint" + ) + mocker_load_state_dict = mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_state_dict" + ) + mocker_load_state_dict_from_url = mocker.patch("torch.hub.load_state_dict_from_url", return_value=OrderedDict()) + + config = self.base_config.copy() + sam = SegmentAnything(config, state_dict=None) + + mocker_load_from_checkpoint.assert_not_called() + mocker_load_state_dict_from_url.assert_called_once() + mocker_load_state_dict.assert_called_once() @e2e_pytest_unit @pytest.mark.parametrize( From 539a37cbd3ec058d70e86704ba159d7aa9f53d26 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 22 Feb 2024 15:03:23 +0900 Subject: [PATCH 23/28] Remove unnecessary flag --- src/otx/algorithms/visual_prompting/tasks/openvino.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index ada64d3c937..6f50ef97940 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -491,7 +491,7 @@ def forward_decoder( # type: ignore ) has_mask_input = self.has_mask_inputs[0] - elif is_cascade and i == 1: + elif i == 1: # Cascaded Post-refinement-1 mask_input, masks = self._postprocess_masks(masks, logits, scores, is_single=True) # noqa: F821 if masks.sum() == 0: @@ -499,7 +499,7 @@ def forward_decoder( # type: ignore has_mask_input = self.has_mask_inputs[1] - elif is_cascade and i == 2: + elif i == 2: # Cascaded Post-refinement-2 mask_input, masks = self._postprocess_masks(masks, logits, scores) # noqa: F821 if masks.sum() == 0: From a01345a3c3d24f5ea9563dc49ccba568d550fdad Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 22 Feb 2024 15:11:16 +0900 Subject: [PATCH 24/28] precommit --- .../models/visual_prompters/test_segment_anything.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index a1a49b0faea..66b56f6dcbf 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -243,7 +243,7 @@ def test_load_checkpoint_from_local_checkpoint(self, mocker, monkeypatch, checkp else: mocker_load_from_checkpoint.assert_not_called() mocker_load_state_dict.assert_called_once() - + @e2e_pytest_unit def test_load_checkpoint_without_checkpoint(self, mocker): """Test load_checkpoint without checkpoint.""" From d0304275439c04bc46887bee21fb758458a8615d Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 22 Feb 2024 15:55:57 +0900 Subject: [PATCH 25/28] Fix --- .../models/visual_prompters/test_segment_anything.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index 66b56f6dcbf..eecddf412a1 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -175,14 +175,10 @@ def test_load_checkpoint_with_state_dict(self, mocker, state_dict: OrderedDict): mocker.patch( "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.set_metrics" ) - mocker_load_state_dict = mocker.patch( - "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_state_dict" - ) sam = SegmentAnything(self.base_config, state_dict=state_dict) sam_state_dict = sam.state_dict() - mocker_load_state_dict.assert_called_once() for k, v in state_dict.items(): assert k in sam_state_dict assert torch.all(v == sam_state_dict[k]) From 9d144fd914d40cd7d3753ddd093abf34bfe62aa8 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 22 Feb 2024 16:55:16 +0900 Subject: [PATCH 26/28] Fix --- .../visual_prompting/tasks/test_openvino.py | 54 ++++++++++++++----- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index 39c5fcfafd8..bc11947b9b7 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -842,7 +842,33 @@ def setup(self, mocker, otx_model): mocker.patch.object( OpenVINOZeroShotVisualPromptingTask, "load_inferencer", return_value=visual_prompting_ov_inferencer ) - self.visual_prompting_ov_task = OpenVINOZeroShotVisualPromptingTask(task_environment=self.task_environment) + self.zero_shot_visual_prompting_ov_task = OpenVINOZeroShotVisualPromptingTask(task_environment=self.task_environment) + + @e2e_pytest_unit + def test_infer(self, mocker): + """Test infer.""" + fake_annotation = [ + Annotation( + Polygon(points=[Point(0, 0)]), + id=0, + labels=[ScoredLabel(LabelEntity(name="fake", domain="VISUALPROMPTING"), probability=1.0)], + ) + ] + + mocker_predict = mocker.patch.object(OpenVINOZeroShotVisualPromptingInferencer, "predict", return_value=fake_annotation) + mocker.patch.object(ShapeFactory, "shape_produces_valid_crop", return_value=True) + + dataset = generate_visual_prompting_dataset() + + updated_dataset = self.zero_shot_visual_prompting_ov_task.infer( + dataset, InferenceParameters(enable_async_inference=False) + ) + + for updated in updated_dataset: + assert updated.annotation_scene.contains_any([LabelEntity(name="fake", domain="VISUALPROMPTING")]) + + mocker_predict.assert_called() + assert mocker_predict.call_count == len(updated_dataset) @e2e_pytest_unit def test_optimize(self, mocker): @@ -857,43 +883,43 @@ def patch_save_model(model, output_xml): dataset = generate_visual_prompting_dataset() output_model = deepcopy(self.task_environment.model) - self.visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.xml", b"image_encoder_xml") - self.visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.bin", b"image_encoder_bin") - self.visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.xml", b"prompt_getter_xml") - self.visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.bin", b"prompt_getter_bin") - self.visual_prompting_ov_task.model.set_data("visual_prompting_decoder.xml", b"decoder_xml") - self.visual_prompting_ov_task.model.set_data("visual_prompting_decoder.bin", b"decoder_bin") + self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.xml", b"image_encoder_xml") + self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.bin", b"image_encoder_bin") + self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.xml", b"prompt_getter_xml") + self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.bin", b"prompt_getter_bin") + self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_decoder.xml", b"decoder_xml") + self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_decoder.bin", b"decoder_bin") mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.read_model", autospec=True) mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.serialize", new=patch_save_model) mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.compile_model") fake_quantize = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.nncf.quantize", autospec=True) - self.visual_prompting_ov_task.optimize(OptimizationType.POT, dataset=dataset, output_model=output_model) + self.zero_shot_visual_prompting_ov_task.optimize(OptimizationType.POT, dataset=dataset, output_model=output_model) fake_quantize.assert_called() assert fake_quantize.call_count == 3 assert ( - self.visual_prompting_ov_task.model.get_data("visual_prompting_image_encoder.xml") + self.zero_shot_visual_prompting_ov_task.model.get_data("visual_prompting_image_encoder.xml") == b"compressed_visual_prompting_image_encoder.xml" ) assert ( - self.visual_prompting_ov_task.model.get_data("visual_prompting_image_encoder.bin") + self.zero_shot_visual_prompting_ov_task.model.get_data("visual_prompting_image_encoder.bin") == b"compressed_visual_prompting_image_encoder.bin" ) assert ( - self.visual_prompting_ov_task.model.get_data("visual_prompting_prompt_getter.xml") + self.zero_shot_visual_prompting_ov_task.model.get_data("visual_prompting_prompt_getter.xml") == b"compressed_visual_prompting_prompt_getter.xml" ) assert ( - self.visual_prompting_ov_task.model.get_data("visual_prompting_prompt_getter.bin") + self.zero_shot_visual_prompting_ov_task.model.get_data("visual_prompting_prompt_getter.bin") == b"compressed_visual_prompting_prompt_getter.bin" ) assert ( - self.visual_prompting_ov_task.model.get_data("visual_prompting_decoder.xml") + self.zero_shot_visual_prompting_ov_task.model.get_data("visual_prompting_decoder.xml") == b"compressed_visual_prompting_decoder.xml" ) assert ( - self.visual_prompting_ov_task.model.get_data("visual_prompting_decoder.bin") + self.zero_shot_visual_prompting_ov_task.model.get_data("visual_prompting_decoder.bin") == b"compressed_visual_prompting_decoder.bin" ) From a99d3b97ec417cbb5c182d5bae134670f03c52f0 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Thu, 22 Feb 2024 17:00:45 +0900 Subject: [PATCH 27/28] Fix --- .../visual_prompting/tasks/test_openvino.py | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index bc11947b9b7..f3ba6dae44b 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -842,8 +842,10 @@ def setup(self, mocker, otx_model): mocker.patch.object( OpenVINOZeroShotVisualPromptingTask, "load_inferencer", return_value=visual_prompting_ov_inferencer ) - self.zero_shot_visual_prompting_ov_task = OpenVINOZeroShotVisualPromptingTask(task_environment=self.task_environment) - + self.zero_shot_visual_prompting_ov_task = OpenVINOZeroShotVisualPromptingTask( + task_environment=self.task_environment + ) + @e2e_pytest_unit def test_infer(self, mocker): """Test infer.""" @@ -855,7 +857,9 @@ def test_infer(self, mocker): ) ] - mocker_predict = mocker.patch.object(OpenVINOZeroShotVisualPromptingInferencer, "predict", return_value=fake_annotation) + mocker_predict = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, "predict", return_value=fake_annotation + ) mocker.patch.object(ShapeFactory, "shape_produces_valid_crop", return_value=True) dataset = generate_visual_prompting_dataset() @@ -883,10 +887,18 @@ def patch_save_model(model, output_xml): dataset = generate_visual_prompting_dataset() output_model = deepcopy(self.task_environment.model) - self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.xml", b"image_encoder_xml") - self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.bin", b"image_encoder_bin") - self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.xml", b"prompt_getter_xml") - self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.bin", b"prompt_getter_bin") + self.zero_shot_visual_prompting_ov_task.model.set_data( + "visual_prompting_image_encoder.xml", b"image_encoder_xml" + ) + self.zero_shot_visual_prompting_ov_task.model.set_data( + "visual_prompting_image_encoder.bin", b"image_encoder_bin" + ) + self.zero_shot_visual_prompting_ov_task.model.set_data( + "visual_prompting_prompt_getter.xml", b"prompt_getter_xml" + ) + self.zero_shot_visual_prompting_ov_task.model.set_data( + "visual_prompting_prompt_getter.bin", b"prompt_getter_bin" + ) self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_decoder.xml", b"decoder_xml") self.zero_shot_visual_prompting_ov_task.model.set_data("visual_prompting_decoder.bin", b"decoder_bin") mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.read_model", autospec=True) @@ -894,7 +906,9 @@ def patch_save_model(model, output_xml): mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.compile_model") fake_quantize = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.nncf.quantize", autospec=True) - self.zero_shot_visual_prompting_ov_task.optimize(OptimizationType.POT, dataset=dataset, output_model=output_model) + self.zero_shot_visual_prompting_ov_task.optimize( + OptimizationType.POT, dataset=dataset, output_model=output_model + ) fake_quantize.assert_called() assert fake_quantize.call_count == 3 From 826408f1f7e09140a65345c66ffaa6f2bcaae5ef Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Fri, 23 Feb 2024 13:43:03 +0900 Subject: [PATCH 28/28] Fix unittest --- .../visual_prompting/tasks/test_openvino.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index e231e1c7b88..8dab6141cab 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -847,8 +847,20 @@ def setup(self, mocker, otx_model): ) @e2e_pytest_unit - def test_infer(self, mocker): - """Test infer.""" + def test_infer_without_reference_info(self): + """Test infer without reference_info.""" + dataset = generate_visual_prompting_dataset() + + updated_dataset = self.zero_shot_visual_prompting_ov_task.infer( + dataset, InferenceParameters(enable_async_inference=False) + ) + + for updated in updated_dataset: + assert len(updated.annotation_scene.annotations) == 0 + + @e2e_pytest_unit + def test_infer_with_reference_info(self, mocker): + """Test infer with reference_info.""" fake_annotation = [ Annotation( Polygon(points=[Point(0, 0)]), @@ -861,6 +873,9 @@ def test_infer(self, mocker): OpenVINOZeroShotVisualPromptingInferencer, "predict", return_value=fake_annotation ) mocker.patch.object(ShapeFactory, "shape_produces_valid_crop", return_value=True) + mocker.patch.object( + self.zero_shot_visual_prompting_ov_task.inferencer, "_get_reference_info", return_value=({}, {}) + ) dataset = generate_visual_prompting_dataset()