Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace unnamed tuple in cls result with a label object #237

Merged
merged 1 commit into from
Nov 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion model_api/python/model_api/models/action_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import numpy as np

from model_api.adapters.utils import RESIZE_TYPES, InputTransform
from model_api.models.result_types import Label

from .model import Model
from .result_types import ClassificationResult
Expand Down Expand Up @@ -223,7 +224,7 @@ def postprocess(
logits = next(iter(outputs.values())).squeeze()
index = np.argmax(logits)
return ClassificationResult(
[(index, self.labels[index], logits[index])],
[Label(int(index), self.labels[index], logits[index])],
np.ndarray(0),
np.ndarray(0),
np.ndarray(0),
Expand Down
27 changes: 13 additions & 14 deletions model_api/python/model_api/models/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,15 @@
import copy
import json
from pathlib import Path
from typing import TYPE_CHECKING, List, Tuple
from typing import TYPE_CHECKING

import numpy as np
from numpy import float32
from openvino.preprocess import PrePostProcessor
from openvino.runtime import Model, Type
from openvino.runtime import opset10 as opset

from model_api.models.image_model import ImageModel
from model_api.models.result_types import ClassificationResult
from model_api.models.result_types import ClassificationResult, Label
from model_api.models.types import BooleanValue, ListValue, NumericalValue, StringValue
from model_api.models.utils import softmax

Expand Down Expand Up @@ -267,7 +266,7 @@ def get_all_probs(self, logits: np.ndarray) -> np.ndarray:
probs = softmax(logits.reshape(-1))
return probs

def get_hierarchical_predictions(self, logits: np.ndarray):
def get_hierarchical_predictions(self, logits: np.ndarray) -> list[Label]:
predicted_labels = []
predicted_scores = []
cls_heads_info = self.hierarchical_info["cls_heads_info"]
Expand All @@ -294,7 +293,7 @@ def get_hierarchical_predictions(self, logits: np.ndarray):
predictions = list(zip(predicted_labels, predicted_scores))
return self.labels_resolver.resolve_labels(predictions)

def get_multilabel_predictions(self, logits: np.ndarray) -> List[Tuple[int, str, float32]]:
def get_multilabel_predictions(self, logits: np.ndarray) -> list[Label]:
logits = sigmoid_numpy(logits)
scores = []
indices = []
Expand All @@ -304,18 +303,18 @@ def get_multilabel_predictions(self, logits: np.ndarray) -> List[Tuple[int, str,
scores.append(logits[i])
labels = [self.labels[i] if self.labels else "" for i in indices]

return list(zip(indices, labels, scores))
return [Label(*data) for data in zip(indices, labels, scores)]

def get_multiclass_predictions(self, outputs: dict) -> list[tuple[int, str, float]]:
def get_multiclass_predictions(self, outputs: dict) -> list[Label]:
if self.embedded_topk:
indicesTensor = outputs[self.out_layer_names[0]][0]
scoresTensor = outputs[self.out_layer_names[1]][0]
labels = [self.labels[i] if self.labels else "" for i in indicesTensor]
else:
scoresTensor = softmax(outputs[self.out_layer_names[0]][0])
indicesTensor = [np.argmax(scoresTensor)]
indicesTensor = [int(np.argmax(scoresTensor))]
labels = [self.labels[i] if self.labels else "" for i in indicesTensor]
return list(zip(indicesTensor, labels, scoresTensor))
return [Label(*data) for data in zip(indicesTensor, labels, scoresTensor)]


def addOrFindSoftmaxAndTopkOutputs(inference_adapter: InferenceAdapter, topk: int, output_raw_scores: bool) -> None:
Expand Down Expand Up @@ -384,7 +383,7 @@ def __init__(self, hierarchical_config: dict) -> None:
for child, parent in self.label_relations:
self.label_tree.add_edge(parent, child)

def resolve_labels(self, predictions: list[tuple]) -> list:
def resolve_labels(self, predictions: list[tuple]) -> list[Label]:
"""Resolves hierarchical labels and exclusivity based on a list of ScoredLabels (labels with probability).
The following two steps are taken:
- select the most likely label from each label group
Expand Down Expand Up @@ -438,7 +437,7 @@ def get_predecessors(lbl: str, candidates: list[str]) -> list:
if new_lbl not in output_labels:
output_labels.append(new_lbl)

return [(self.label_to_idx[lbl], lbl, label_to_prob[lbl]) for lbl in sorted(output_labels)]
return [Label(self.label_to_idx[lbl], lbl, label_to_prob[lbl]) for lbl in sorted(output_labels)]


class ProbabilisticLabelsResolver(GreedyLabelsResolver):
Expand All @@ -447,7 +446,7 @@ def __init__(self, hierarchical_config: dict, warmup_cache: bool = True) -> None
if warmup_cache:
self.label_tree.get_labels_in_topological_order()

def resolve_labels(self, predictions: list[tuple[str, float]]) -> list[tuple[int, str, float]]:
def resolve_labels(self, predictions: list[tuple[str, float]]) -> list[Label]:
"""Resolves hierarchical labels and exclusivity based on a list of ScoredLabels (labels with probability).

The following two steps are taken:
Expand All @@ -467,7 +466,7 @@ def resolve_labels(self, predictions: list[tuple[str, float]]) -> list[tuple[int
def __resolve_labels_probabilistic(
self,
label_to_probability: dict[str, float],
) -> list[tuple[int, str, float]]:
) -> list[Label]:
"""Resolves hierarchical labels and exclusivity based on a probabilistic label output.

- selects the most likely (max) label from an exclusive group
Expand Down Expand Up @@ -495,7 +494,7 @@ def __resolve_labels_probabilistic(
for lbl, probability in sorted(resolved.items()):
if probability > 0: # only return labels with non-zero probability
result.append(
(
Label(
self.label_to_idx[lbl],
lbl,
# retain the original probability in the output
Expand Down
3 changes: 2 additions & 1 deletion model_api/python/model_api/models/result_types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# SPDX-License-Identifier: Apache-2.0

from .anomaly import AnomalyResult
from .classification import ClassificationResult
from .classification import ClassificationResult, Label
from .detection import Detection, DetectionResult
from .keypoint import DetectedKeypoints
from .segmentation import (
Expand All @@ -23,6 +23,7 @@
"Detection",
"DetectionResult",
"DetectedKeypoints",
"Label",
"SegmentedObject",
"SegmentedObjectWithRects",
"ImageResultWithSoftPrediction",
Expand Down
28 changes: 25 additions & 3 deletions model_api/python/model_api/models/result_types/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,42 @@

from __future__ import annotations

from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Generator

from .utils import array_shape_to_str

if TYPE_CHECKING:
import numpy as np


class Label:
"""Entity representing a predicted label."""

def __init__(
self,
id: int | None = None,
name: str | None = None,
confidence: float | None = None,
) -> None:
self.name = name
self.confidence = confidence
self.id = id

def __iter__(self) -> Generator:
output = (self.id, self.name, self.confidence)
for i in output:
yield i

def __str__(self) -> str:
return f"{self.id} ({self.name}): {self.confidence:.3f}"


class ClassificationResult:
"""Results for classification models."""

def __init__(
self,
top_labels: list[tuple[int, str, float]] | None = None,
top_labels: list[Label] | None = None,
saliency_map: np.ndarray | None = None,
feature_vector: np.ndarray | None = None,
raw_scores: np.ndarray | None = None,
Expand All @@ -30,7 +52,7 @@ def __init__(

def __str__(self) -> str:
assert self.top_labels is not None
labels = ", ".join(f"{idx} ({label}): {confidence:.3f}" for idx, label, confidence in self.top_labels)
labels = ", ".join(str(label) for label in self.top_labels)
return (
f"{labels}, {array_shape_to_str(self.saliency_map)}, {array_shape_to_str(self.feature_vector)}, "
f"{array_shape_to_str(self.raw_scores)}"
Expand Down
20 changes: 20 additions & 0 deletions tests/python/unit/results/test_cls_result.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#
# Copyright (C) 2020-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

import numpy as np
from model_api.models.result_types import ClassificationResult, Label


def test_cls_result():
label = Label(1, "label", 0.5)
tst_vector = np.array([1, 2, 3])
cls_result = ClassificationResult([label], tst_vector, tst_vector, tst_vector)

assert cls_result.top_labels[0].id == 1
assert cls_result.top_labels[0].name == "label"
assert cls_result.top_labels[0].confidence == 0.5
assert str(cls_result) == "1 (label): 0.500, [3], [3], [3]"
assert cls_result.top_labels[0].__str__() == "1 (label): 0.500"
assert tuple(cls_result.top_labels[0].__iter__()) == (1, "label", 0.5)
Loading