Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix OTE SDK test #1320

Merged
merged 1 commit into from
Oct 31, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,10 @@ def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, An
detections = self.convert2array(detections)
if not isinstance(detections, np.ndarray) and isinstance(self, OpenVINODetectionInferencer):
detections = np.array(detections)
return self.converter.convert_to_annotation(detections, metadata)
if isinstance(self.converter, MaskToAnnotationConverter):
return self.converter.convert_to_annotation(detections, metadata)
detections[:, 2:] /= np.tile(metadata["original_shape"][1::-1], 2)
return self.converter.convert_to_annotation(detections)

@check_input_parameters_type()
def predict(self, image: np.ndarray) -> Tuple[AnnotationSceneEntity, np.ndarray, np.ndarray]:
Expand Down Expand Up @@ -255,9 +258,9 @@ def predict_tile(self,
labels = labels[keep]
scores = scores[keep]

metadata["original_shape"] = original_shape
detections = np.concatenate((labels[:, np.newaxis], scores[:, np.newaxis], boxes), axis=-1)
detections = self.converter.convert_to_annotation(detections, metadata)
detections[:, 2:] /= np.tile(original_shape[1::-1], 2)
detections = self.converter.convert_to_annotation(detections)
return detections


Expand Down
4 changes: 1 addition & 3 deletions ote_sdk/ote_sdk/entities/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,14 +411,12 @@ def exportable_code_adapter(self) -> Optional[ExportableCodeAdapter]:
"""
return self.__exportable_code_adapter

def get_data(self, key: str) -> Optional[bytes]:
def get_data(self, key: str) -> bytes:
"""
Fetches byte data for a certain model.
:param key: key to fetch data for
:return:
"""
if key not in self.__model_adapters:
return None
return self.__model_adapters[key].data

def set_data(self, key: str, data: Union[bytes, IDataSource], skip_deletion=False):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#

import abc
from typing import Any, Dict, List, Optional, Tuple
from typing import Any, Dict, List, Optional, Tuple, Union

import cv2
import numpy as np
Expand Down Expand Up @@ -54,11 +54,13 @@ class DetectionToAnnotationConverter(IPredictionToAnnotationConverter):
Converts Object Detections to Annotations
"""

def __init__(self, labels: LabelSchemaEntity):
self.label_map = dict(enumerate(labels.get_labels(include_empty=False)))
def __init__(self, labels: Union[LabelSchemaEntity, List]):
if isinstance(labels, LabelSchemaEntity):
labels = labels.get_labels(include_empty=False)
self.label_map = dict(enumerate(labels))

def convert_to_annotation(
self, predictions: np.ndarray, metadata: Dict[str, Any]
self, predictions: np.ndarray, metadata: Optional[Dict] = None
) -> AnnotationSceneEntity:
"""
Converts a set of predictions into an AnnotationScene object
Expand All @@ -78,7 +80,7 @@ def convert_to_annotation(
:returns AnnotationScene: AnnotationScene Object containing the boxes
obtained from the prediction
"""
annotations = self.__convert_to_annotations(predictions, metadata)
annotations = self.__convert_to_annotations(predictions)
# media_identifier = ImageIdentifier(image_id=ID())
annotation_scene = AnnotationSceneEntity(
id=ID(),
Expand All @@ -90,9 +92,7 @@ def convert_to_annotation(

return annotation_scene

def __convert_to_annotations(
self, predictions: np.ndarray, metadata: Dict[str, Any]
) -> List[Annotation]:
def __convert_to_annotations(self, predictions: np.ndarray) -> List[Annotation]:
"""
Converts a list of Detections to OTE SDK Annotation objects

Expand All @@ -115,7 +115,6 @@ def __convert_to_annotations(
f"got {predictions.shape}"
)

image_size = metadata["original_shape"][1::-1]
for prediction in predictions:
if prediction.shape == (7,):
# Some OpenVINO models use an output shape of [7,]
Expand All @@ -125,7 +124,7 @@ def __convert_to_annotations(
label = int(prediction[0])
confidence = prediction[1]
scored_label = ScoredLabel(self.label_map[label], confidence)
coords = prediction[2:] / np.tile(image_size, 2)
coords = prediction[2:]
annotations.append(
Annotation(
Rectangle(coords[0], coords[1], coords[2], coords[3]),
Expand Down