From 07fa9a1edbab4bce7e871addc624e11995a41a5c Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 7 Aug 2023 22:53:39 +0100 Subject: [PATCH 01/11] Removing old code --- aws_lambda_powertools/metrics/metrics.py | 117 ----------------------- 1 file changed, 117 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index d65cb62720a..c5afbe9ad57 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -208,120 +208,3 @@ def service(self, service): # we can quickly revert and duplicate code while using self.provider EphemeralMetrics = AmazonCloudWatchEMFProvider - -# noqa: ERA001 -# class EphemeralMetrics(MetricManager): -# """Non-singleton version of Metrics to not persist metrics across instances -# -# NOTE: This is useful when you want to: -# -# - Create metrics for distinct namespaces -# - Create the same metrics with different dimensions more than once -# """ -# -# # _dimensions: Dict[str, str] = {} -# _default_dimensions: Dict[str, Any] = {} -# -# def __init__( -# self, -# service: str | None = None, -# namespace: str | None = None, -# provider: AmazonCloudWatchEMFProvider | None = None, -# ): -# super().__init__(namespace=namespace, service=service) -# -# self.default_dimensions = self._default_dimensions -# # # self.dimension_set = self._dimensions -# # self.dimension_set.update(**self._default_dimensions) -# -# self.provider = provider or AmazonCloudWatchEMFProvider( -# namespace=namespace, -# service=service, -# metric_set=self.metric_set, -# metadata_set=self.metadata_set, -# dimension_set=self.dimension_set, -# default_dimensions=self._default_dimensions, -# ) -# -# def add_metric( -# self, -# name: str, -# unit: MetricUnit | str, -# value: float, -# resolution: MetricResolution | int = 60, -# ) -> None: -# return self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) -# -# def add_dimension(self, name: str, value: str) -> None: -# return self.provider.add_dimension(name=name, value=value) -# -# def serialize_metric_set( -# self, -# metrics: Dict | None = None, -# dimensions: Dict | None = None, -# metadata: Dict | None = None, -# ) -> Dict: -# return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) -# -# def add_metadata(self, key: str, value: Any) -> None: -# self.provider.add_metadata(key=key, value=value) -# -# def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: -# self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) -# -# def log_metrics( -# self, -# lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, -# capture_cold_start_metric: bool = False, -# raise_on_empty_metrics: bool = False, -# default_dimensions: Dict[str, str] | None = None, -# ): -# return self.provider.log_metrics( -# lambda_handler=lambda_handler, -# capture_cold_start_metric=capture_cold_start_metric, -# raise_on_empty_metrics=raise_on_empty_metrics, -# default_dimensions=default_dimensions, -# ) -# -# def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: -# return self.provider._extract_metric_resolution_value(resolution=resolution) -# -# def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: -# return self.provider._extract_metric_unit_value(unit=unit) -# -# def _add_cold_start_metric(self, context: Any) -> None: -# return self.provider._add_cold_start_metric(context=context) -# -# def set_default_dimensions(self, **dimensions) -> None: -# """Persist dimensions across Lambda invocations -# -# Parameters -# ---------- -# dimensions : Dict[str, Any], optional -# metric dimensions as key=value -# -# Example -# ------- -# **Sets some default dimensions that will always be present across metrics and invocations** -# -# from aws_lambda_powertools import Metrics -# -# metrics = Metrics(namespace="ServerlessAirline", service="payment") -# metrics.set_default_dimensions(environment="demo", another="one") -# -# @metrics.log_metrics() -# def lambda_handler(): -# return True -# """ -# return self.provider.set_default_dimensions(**dimensions) -# -# def clear_default_dimensions(self) -> None: -# self.default_dimensions.clear() -# -# def clear_metrics(self) -> None: -# self.provider.clear_metrics() -# # re-add default dimensions -# self.set_default_dimensions(**self.default_dimensions) -# - -# __all__ = [] From a7e85acfcb25e17165717cc2db98879a40db32af Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 7 Aug 2023 23:28:56 +0100 Subject: [PATCH 02/11] Making function standalone --- aws_lambda_powertools/metrics/metrics.py | 8 +- .../provider/cloudwatch_emf/cloudwatch.py | 82 ++++--------------- aws_lambda_powertools/metrics/shared.py | 72 ++++++++++++++++ 3 files changed, 87 insertions(+), 75 deletions(-) create mode 100644 aws_lambda_powertools/metrics/shared.py diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index c5afbe9ad57..6ad2c577407 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -1,7 +1,7 @@ # NOTE: keeps for compatibility from __future__ import annotations -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Dict, Optional from aws_lambda_powertools.metrics.base import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider @@ -138,12 +138,6 @@ def log_metrics( default_dimensions=default_dimensions, ) - def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: - return self.provider._extract_metric_resolution_value(resolution=resolution) - - def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: - return self.provider._extract_metric_unit_value(unit=unit) - def _add_cold_start_metric(self, context: Any) -> None: self.provider._add_cold_start_metric(context=context) diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index 921fcee6045..36ea3d5c18c 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -8,18 +8,18 @@ import os import warnings from collections import defaultdict -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional from aws_lambda_powertools.metrics.base import single_metric from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider import MetricsProviderBase from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( - MetricResolutionError, - MetricUnitError, -) from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit +from aws_lambda_powertools.metrics.shared import ( + extract_cloudwatch_metric_resolution_value, + extract_cloudwatch_metric_unit_value, +) from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice @@ -123,8 +123,15 @@ def add_metric( if not isinstance(value, numbers.Number): raise MetricValueError(f"{value} is not a valid number") - unit = self._extract_metric_unit_value(unit=unit) - resolution = self._extract_metric_resolution_value(resolution=resolution) + unit = extract_cloudwatch_metric_unit_value( + metric_units=self._metric_units, + metric_valid_options=self._metric_unit_valid_options, + unit=unit, + ) + resolution = extract_cloudwatch_metric_resolution_value( + metric_resolutions=self._metric_resolutions, + resolution=resolution, + ) metric: Dict = self.metric_set.get(name, defaultdict(list)) metric["Unit"] = unit metric["StorageResolution"] = resolution @@ -392,67 +399,6 @@ def decorate(event, context): return decorate - def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: - """Return metric value from metric unit whether that's str or MetricResolution enum - - Parameters - ---------- - unit : Union[int, MetricResolution] - Metric resolution - - Returns - ------- - int - Metric resolution value must be 1 or 60 - - Raises - ------ - MetricResolutionError - When metric resolution is not supported by CloudWatch - """ - if isinstance(resolution, MetricResolution): - return resolution.value - - if isinstance(resolution, int) and resolution in self._metric_resolutions: - return resolution - - raise MetricResolutionError( - f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolutions}", # noqa: E501 - ) - - def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: - """Return metric value from metric unit whether that's str or MetricUnit enum - - Parameters - ---------- - unit : Union[str, MetricUnit] - Metric unit - - Returns - ------- - str - Metric unit value (e.g. "Seconds", "Count/Second") - - Raises - ------ - MetricUnitError - When metric unit is not supported by CloudWatch - """ - - if isinstance(unit, str): - if unit in self._metric_unit_valid_options: - unit = MetricUnit[unit].value - - if unit not in self._metric_units: - raise MetricUnitError( - f"Invalid metric unit '{unit}', expected either option: {self._metric_unit_valid_options}", - ) - - if isinstance(unit, MetricUnit): - unit = unit.value - - return unit - def _add_cold_start_metric(self, context: Any) -> None: """Add cold start metric and function_name dimension diff --git a/aws_lambda_powertools/metrics/shared.py b/aws_lambda_powertools/metrics/shared.py new file mode 100644 index 00000000000..d951c0749a3 --- /dev/null +++ b/aws_lambda_powertools/metrics/shared.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from typing import List + +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( + MetricResolutionError, + MetricUnitError, +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit + + +def extract_cloudwatch_metric_resolution_value(metric_resolutions: List, resolution: int | MetricResolution) -> int: + """Return metric value from CloudWatch metric unit whether that's str or MetricResolution enum + + Parameters + ---------- + unit : Union[int, MetricResolution] + Metric resolution + + Returns + ------- + int + Metric resolution value must be 1 or 60 + + Raises + ------ + MetricResolutionError + When metric resolution is not supported by CloudWatch + """ + if isinstance(resolution, MetricResolution): + return resolution.value + + if isinstance(resolution, int) and resolution in metric_resolutions: + return resolution + + raise MetricResolutionError( + f"Invalid metric resolution '{resolution}', expected either option: {metric_resolutions}", # noqa: E501 + ) + + +def extract_cloudwatch_metric_unit_value(metric_units: List, metric_valid_options: List, unit: str | MetricUnit) -> str: + """Return metric value from CloudWatch metric unit whether that's str or MetricUnit enum + + Parameters + ---------- + unit : Union[str, MetricUnit] + Metric unit + + Returns + ------- + str + Metric unit value (e.g. "Seconds", "Count/Second") + + Raises + ------ + MetricUnitError + When metric unit is not supported by CloudWatch + """ + + if isinstance(unit, str): + if unit in metric_valid_options: + unit = MetricUnit[unit].value + + if unit not in metric_units: + raise MetricUnitError( + f"Invalid metric unit '{unit}', expected either option: {metric_valid_options}", + ) + + if isinstance(unit, MetricUnit): + unit = unit.value + + return unit From d93fac6ce1da87064ddc91c461e1b54d76ccf311 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 7 Aug 2023 23:31:07 +0100 Subject: [PATCH 03/11] Fixing docstring --- aws_lambda_powertools/metrics/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 6ad2c577407..bcf21051635 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -8,7 +8,7 @@ class Metrics: - """Metrics create an EMF object with up to 100 metrics + """Metrics create an CloudWatch EMF object with up to 100 metrics Use Metrics when you need to create multiple metrics that have dimensions in common (e.g. service_name="payment"). From b4ac2f2da41e611198a9dea3156f343e83a76931 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 7 Aug 2023 23:57:04 +0100 Subject: [PATCH 04/11] Removing subclassing + adding unit tests --- .../metrics/provider/base.py | 123 ++---------------- .../provider/cloudwatch_emf/cloudwatch.py | 3 +- tests/unit/metrics/test_metrics.py | 8 ++ 3 files changed, 20 insertions(+), 114 deletions(-) create mode 100644 tests/unit/metrics/test_metrics.py diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 7617193033e..13232a911ee 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -4,16 +4,17 @@ import logging from typing import Any, Callable, Dict, Optional -from typing_extensions import Protocol +from typing_extensions import Protocol, runtime_checkable logger = logging.getLogger(__name__) is_cold_start = True +@runtime_checkable class MetricsProviderBase(Protocol): """ - Class for metric provider interface. + Interface for MetricsProvider. This class serves as an interface for creating your own metric provider. Inherit from this class and implement the required methods to define your specific metric provider. @@ -25,80 +26,19 @@ class MetricsProviderBase(Protocol): """ def add_metric(self, *args: Any, **kwargs: Any) -> Any: - """ - Abstract method for adding a metric. - - This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. - - Parameters - ---------- - *args: - Positional arguments. - *kwargs: - Keyword arguments. - - Returns - ---------- - Dict - A combined metrics dictionary. - - Raises - ---------- - NotImplementedError - This method must be implemented in subclasses. - """ - raise NotImplementedError + ... def serialize_metric_set(self, *args: Any, **kwargs: Any) -> Any: - """ - Abstract method for serialize a metric. - - This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + ... - Parameters - ---------- - *args: - Positional arguments. - *kwargs: - Keyword arguments. - - Returns - ---------- - Dict - Serialized metrics - - Raises - ---------- - NotImplementedError - This method must be implemented in subclasses. - """ - raise NotImplementedError - - # flush serialized data to output, or send to API directly def flush_metrics(self, *args: Any, **kwargs) -> Any: - """ - Abstract method for flushing a metric. - - This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. - - Parameters - ---------- - *args: - Positional arguments. - *kwargs: - Keyword arguments. - - Raises - ---------- - NotImplementedError - This method must be implemented in subclasses. - """ - raise NotImplementedError + ... +@runtime_checkable class MetricsBase(Protocol): """ - Class for metric template. + Interface for metric template. This class serves as a template for creating your own metric class. Inherit from this class and implement the necessary methods to define your specific metric. @@ -107,54 +47,13 @@ class MetricsBase(Protocol): """ def add_metric(self, *args, **kwargs): - """ - Abstract method for adding a metric. - - This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. - - Parameters - ---------- - *args: - Positional arguments. - *kwargs: - Keyword arguments. - - Returns - ---------- - Dict - A combined metrics dictionary. - - Raises - ---------- - NotImplementedError - This method must be implemented in subclasses. - """ - raise NotImplementedError + ... def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - """Manually flushes the metrics. This is normally not necessary, - unless you're running on other runtimes besides Lambda, where the @log_metrics - decorator already handles things for you. - - Parameters - ---------- - raise_on_empty_metrics : bool, optional - raise exception if no metrics are emitted, by default False - """ - raise NotImplementedError + ... def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: - """ - Add a cold start metric for a specific function. - - Parameters - ---------- - metric_name: str - The name of the cold start metric to add. - function_name: str - The name of the function associated with the cold start metric. - """ - raise NotImplementedError + ... def log_metrics( self, diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index 36ea3d5c18c..db1fb6dfec9 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -12,7 +12,6 @@ from aws_lambda_powertools.metrics.base import single_metric from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.provider import MetricsProviderBase from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit @@ -27,7 +26,7 @@ logger = logging.getLogger(__name__) -class AmazonCloudWatchEMFProvider(MetricsProviderBase): +class AmazonCloudWatchEMFProvider: """Base class for metric functionality (namespace, metric, dimension, serialization) MetricManager creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). diff --git a/tests/unit/metrics/test_metrics.py b/tests/unit/metrics/test_metrics.py new file mode 100644 index 00000000000..fa31dd7898c --- /dev/null +++ b/tests/unit/metrics/test_metrics.py @@ -0,0 +1,8 @@ +from aws_lambda_powertools.metrics.provider import ( + MetricsProviderBase, +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider + + +def test_amazoncloudwatchemf_is_subclass_of_metricsproviderbase(): + assert issubclass(AmazonCloudWatchEMFProvider, MetricsProviderBase) From 0361983bd0a7e465af6a1d1363dd1f8f9c26b715 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 8 Aug 2023 17:19:24 +0100 Subject: [PATCH 05/11] Refactoring metrics --- aws_lambda_powertools/metrics/base.py | 8 +- .../metrics/provider/__init__.py | 5 +- .../metrics/provider/base.py | 147 ++++++++++++++---- .../provider/cloudwatch_emf/cloudwatch.py | 52 ++----- .../{cloudwatch_emf => }/cold_start.py | 0 .../test_metrics_cloudwatch_emf.py} | 32 ++-- tests/unit/metrics/test_metrics.py | 4 +- 7 files changed, 159 insertions(+), 89 deletions(-) rename aws_lambda_powertools/metrics/provider/{cloudwatch_emf => }/cold_start.py (100%) rename tests/functional/{test_metrics.py => metrics/test_metrics_cloudwatch_emf.py} (98%) diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index b32421431cd..2c45aa1fb3e 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -17,12 +17,12 @@ MetricValueError, SchemaValidationError, ) -from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cold_start import ( - reset_cold_start_flag, # noqa: F401 # backwards compatibility -) +from aws_lambda_powertools.metrics.provider import cold_start from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit +from aws_lambda_powertools.metrics.provider.cold_start import ( + reset_cold_start_flag, # noqa: F401 # backwards compatibility +) from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index 814812c135b..30019199c52 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -1,6 +1,5 @@ -from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase +from aws_lambda_powertools.metrics.provider.base import BaseProvider __all__ = [ - "MetricsBase", - "MetricsProviderBase", + "BaseProvider", ] diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 13232a911ee..818409bfdde 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -2,19 +2,17 @@ import functools import logging +from abc import ABC, abstractmethod from typing import Any, Callable, Dict, Optional -from typing_extensions import Protocol, runtime_checkable +from aws_lambda_powertools.metrics.provider import cold_start logger = logging.getLogger(__name__) -is_cold_start = True - -@runtime_checkable -class MetricsProviderBase(Protocol): +class BaseProvider(ABC): """ - Interface for MetricsProvider. + Class for metric provider interface. This class serves as an interface for creating your own metric provider. Inherit from this class and implement the required methods to define your specific metric provider. @@ -25,41 +23,127 @@ class MetricsProviderBase(Protocol): 3. Customize the behavior and functionality of the metric provider in your subclass. """ + @abstractmethod def add_metric(self, *args: Any, **kwargs: Any) -> Any: - ... + """ + Abstract method for adding a metric. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Returns + ---------- + Dict + A combined metrics dictionary. + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError + @abstractmethod def serialize_metric_set(self, *args: Any, **kwargs: Any) -> Any: - ... + """ + Abstract method for serialize a metric. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Returns + ---------- + Dict + Serialized metrics + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError + @abstractmethod def flush_metrics(self, *args: Any, **kwargs) -> Any: - ... + """ + Abstract method for flushing a metric. + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. -@runtime_checkable -class MetricsBase(Protocol): - """ - Interface for metric template. + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError - This class serves as a template for creating your own metric class. Inherit from this class - and implement the necessary methods to define your specific metric. + @abstractmethod + def clear_metrics(self, *args: Any, **kwargs) -> Any: + """ + Abstract method for clear metric instance. - NOTE: need to improve this docstring - """ + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. - def add_metric(self, *args, **kwargs): - ... + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def add_cold_start_metric(self, context: Any) -> Any: + """ + Abstract method for clear metric instance. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - ... + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. - def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: - ... + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError def log_metrics( self, lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, + **kwargs, ): """Decorator to serialize and publish metrics at the end of a function execution. @@ -96,6 +180,8 @@ def handler(event, context): Propagate error received """ + default_dimensions = kwargs.get("default_dimensions") + # If handler is None we've been called with parameters # Return a partial function with args filled if lambda_handler is None: @@ -104,11 +190,14 @@ def handler(event, context): self.log_metrics, capture_cold_start_metric=capture_cold_start_metric, raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, ) @functools.wraps(lambda_handler) def decorate(event, context): try: + if default_dimensions: + self.set_default_dimensions(**default_dimensions) response = lambda_handler(event, context) if capture_cold_start_metric: self._add_cold_start_metric(context=context) @@ -127,17 +216,15 @@ def _add_cold_start_metric(self, context: Any) -> None: context : Any Lambda context """ - global is_cold_start - if not is_cold_start: + if not cold_start.is_cold_start: return logger.debug("Adding cold start metric and function_name dimension") - self.add_cold_start_metric(metric_name="ColdStart", function_name=context.function_name) + self.add_cold_start_metric(context=context) - is_cold_start = False + cold_start.is_cold_start = False def reset_cold_start_flag_provider(): - global is_cold_start - if not is_cold_start: - is_cold_start = True + if not cold_start.is_cold_start: + cold_start.is_cold_start = True diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index db1fb6dfec9..ae612aa7aee 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -1,7 +1,6 @@ from __future__ import annotations import datetime -import functools import json import logging import numbers @@ -12,7 +11,7 @@ from aws_lambda_powertools.metrics.base import single_metric from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start +from aws_lambda_powertools.metrics.provider.base import BaseProvider from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.shared import ( @@ -26,7 +25,7 @@ logger = logging.getLogger(__name__) -class AmazonCloudWatchEMFProvider: +class AmazonCloudWatchEMFProvider(BaseProvider): """Base class for metric functionality (namespace, metric, dimension, serialization) MetricManager creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). @@ -335,7 +334,7 @@ def log_metrics( lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, - default_dimensions: Dict[str, str] | None = None, + **kwargs, ): """Decorator to serialize and publish metrics at the end of a function execution. @@ -372,33 +371,14 @@ def handler(event, context): Propagate error received """ - # If handler is None we've been called with parameters - # Return a partial function with args filled - if lambda_handler is None: - logger.debug("Decorator called with parameters") - return functools.partial( - self.log_metrics, - capture_cold_start_metric=capture_cold_start_metric, - raise_on_empty_metrics=raise_on_empty_metrics, - default_dimensions=default_dimensions, - ) - - @functools.wraps(lambda_handler) - def decorate(event, context): - try: - if default_dimensions: - self.set_default_dimensions(**default_dimensions) - response = lambda_handler(event, context) - if capture_cold_start_metric: - self._add_cold_start_metric(context=context) - finally: - self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) - - return response - - return decorate + return super().log_metrics( + lambda_handler=lambda_handler, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + **kwargs, + ) - def _add_cold_start_metric(self, context: Any) -> None: + def add_cold_start_metric(self, context: Any) -> None: """Add cold start metric and function_name dimension Parameters @@ -406,13 +386,11 @@ def _add_cold_start_metric(self, context: Any) -> None: context : Any Lambda context """ - if cold_start.is_cold_start: - logger.debug("Adding cold start metric and function_name dimension") - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace=self.namespace) as metric: - metric.add_dimension(name="function_name", value=context.function_name) - if self.service: - metric.add_dimension(name="service", value=str(self.service)) - cold_start.is_cold_start = False + logger.debug("Adding cold start metric and function_name dimension") + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace=self.namespace) as metric: + metric.add_dimension(name="function_name", value=context.function_name) + if self.service: + metric.add_dimension(name="service", value=str(self.service)) def set_default_dimensions(self, **dimensions) -> None: """Persist dimensions across Lambda invocations diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cold_start.py b/aws_lambda_powertools/metrics/provider/cold_start.py similarity index 100% rename from aws_lambda_powertools/metrics/provider/cloudwatch_emf/cold_start.py rename to aws_lambda_powertools/metrics/provider/cold_start.py diff --git a/tests/functional/test_metrics.py b/tests/functional/metrics/test_metrics_cloudwatch_emf.py similarity index 98% rename from tests/functional/test_metrics.py rename to tests/functional/metrics/test_metrics_cloudwatch_emf.py index 329ff7064dd..07fd4793c8c 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/metrics/test_metrics_cloudwatch_emf.py @@ -16,14 +16,11 @@ SchemaValidationError, single_metric, ) -from aws_lambda_powertools.metrics.provider import ( - MetricsBase, - MetricsProviderBase, -) +from aws_lambda_powertools.metrics.provider import BaseProvider from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cold_start import reset_cold_start_flag from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS +from aws_lambda_powertools.metrics.provider.cold_start import reset_cold_start_flag @pytest.fixture(scope="function", autouse=True) @@ -1273,7 +1270,7 @@ def lambda_handler(evt, ctx): @pytest.fixture -def metrics_provider() -> MetricsProviderBase: +def metrics_provider() -> BaseProvider: class MetricsProvider: def __init__(self): self.metric_store: List = [] @@ -1283,7 +1280,7 @@ def __init__(self): def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): self.metric_store.append({"name": name, "value": value, "tag": tag}) - def serialize(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): if raise_on_empty_metrics and len(self.metric_store) == 0: raise SchemaValidationError("Must contain at least one metric.") @@ -1300,8 +1297,8 @@ def clear(self): @pytest.fixture -def metrics_class() -> MetricsBase: - class MetricsClass(MetricsBase): +def metrics_class() -> BaseProvider: + class MetricsClass(BaseProvider): def __init__(self, provider): self.provider = provider super().__init__() @@ -1310,12 +1307,21 @@ def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs) self.provider.add_metric(name=name, value=value, tag=tag) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - self.provider.serialize(raise_on_empty_metrics=raise_on_empty_metrics) + self.provider.serialize_metric_set(raise_on_empty_metrics=raise_on_empty_metrics) self.provider.flush() self.provider.clear() - def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: - self.provider.add_metric(name=metric_name, value=1, function_name=function_name) + def add_cold_start_metric(self, context: Any) -> None: + self.provider.add_metric(name="ColdStart", value=1, function_name=context.function_name) + + def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + if raise_on_empty_metrics and len(self.metric_store) == 0: + raise SchemaValidationError("Must contain at least one metric.") + + self.result = self.provider.flush() + + def clear_metrics(self) -> None: + self.provider.clear_metrics() return MetricsClass @@ -1323,7 +1329,7 @@ def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: def test_metrics_provider_basic(capsys, metrics_provider, metric): provider = metrics_provider() provider.add_metric(**metric) - provider.serialize() + provider.serialize_metric_set() provider.flush() output = capture_metrics_output(capsys) assert output[0]["name"] == metric["name"] diff --git a/tests/unit/metrics/test_metrics.py b/tests/unit/metrics/test_metrics.py index fa31dd7898c..7fa635c6c27 100644 --- a/tests/unit/metrics/test_metrics.py +++ b/tests/unit/metrics/test_metrics.py @@ -1,8 +1,8 @@ from aws_lambda_powertools.metrics.provider import ( - MetricsProviderBase, + BaseProvider, ) from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider def test_amazoncloudwatchemf_is_subclass_of_metricsproviderbase(): - assert issubclass(AmazonCloudWatchEMFProvider, MetricsProviderBase) + assert issubclass(AmazonCloudWatchEMFProvider, BaseProvider) From e8fdaa1a0902a2dbaf87507c1ee9c2e53efe936a Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 8 Aug 2023 17:35:21 +0100 Subject: [PATCH 06/11] Refactoring tests --- tests/functional/metrics/conftest.py | 91 +++++++ .../metrics/test_metrics_cloudwatch_emf.py | 251 +----------------- .../metrics/test_metrics_provider.py | 182 +++++++++++++ tests/unit/metrics/test_metrics.py | 8 - 4 files changed, 274 insertions(+), 258 deletions(-) create mode 100644 tests/functional/metrics/conftest.py create mode 100644 tests/functional/metrics/test_metrics_provider.py delete mode 100644 tests/unit/metrics/test_metrics.py diff --git a/tests/functional/metrics/conftest.py b/tests/functional/metrics/conftest.py new file mode 100644 index 00000000000..cb0e083ca1f --- /dev/null +++ b/tests/functional/metrics/conftest.py @@ -0,0 +1,91 @@ +from typing import Any, Dict, List, Union + +import pytest + +from aws_lambda_powertools.metrics import ( + MetricResolution, + Metrics, + MetricUnit, +) +from aws_lambda_powertools.metrics.provider.cold_start import reset_cold_start_flag + + +@pytest.fixture(scope="function", autouse=True) +def reset_metric_set(): + metrics = Metrics() + metrics.clear_metrics() + metrics.clear_default_dimensions() + reset_cold_start_flag() # ensure each test has cold start + yield + + +@pytest.fixture +def metric_with_resolution() -> Dict[str, Union[str, int]]: + return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.High} + + +@pytest.fixture +def metric() -> Dict[str, str]: + return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1} + + +@pytest.fixture +def metrics() -> List[Dict[str, str]]: + return [ + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, + {"name": "metric_two", "unit": MetricUnit.Count, "value": 1}, + ] + + +@pytest.fixture +def metrics_same_name() -> List[Dict[str, str]]: + return [ + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 5}, + ] + + +@pytest.fixture +def dimension() -> Dict[str, str]: + return {"name": "test_dimension", "value": "test"} + + +@pytest.fixture +def dimensions() -> List[Dict[str, str]]: + return [ + {"name": "test_dimension", "value": "test"}, + {"name": "test_dimension_2", "value": "test"}, + ] + + +@pytest.fixture +def non_str_dimensions() -> List[Dict[str, Any]]: + return [ + {"name": "test_dimension", "value": True}, + {"name": "test_dimension_2", "value": 3}, + ] + + +@pytest.fixture +def namespace() -> str: + return "test_namespace" + + +@pytest.fixture +def service() -> str: + return "test_service" + + +@pytest.fixture +def metadata() -> Dict[str, str]: + return {"key": "username", "value": "test"} + + +@pytest.fixture +def a_hundred_metrics() -> List[Dict[str, str]]: + return [{"name": f"metric_{i}", "unit": "Count", "value": 1} for i in range(100)] + + +@pytest.fixture +def a_hundred_metric_values() -> List[Dict[str, str]]: + return [{"name": "metric", "unit": "Count", "value": i} for i in range(100)] diff --git a/tests/functional/metrics/test_metrics_cloudwatch_emf.py b/tests/functional/metrics/test_metrics_cloudwatch_emf.py index 07fd4793c8c..101c3d86883 100644 --- a/tests/functional/metrics/test_metrics_cloudwatch_emf.py +++ b/tests/functional/metrics/test_metrics_cloudwatch_emf.py @@ -1,7 +1,7 @@ import json import warnings from collections import namedtuple -from typing import Any, Dict, List, Union +from typing import Dict, List import pytest @@ -16,92 +16,8 @@ SchemaValidationError, single_metric, ) -from aws_lambda_powertools.metrics.provider import BaseProvider -from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS -from aws_lambda_powertools.metrics.provider.cold_start import reset_cold_start_flag - - -@pytest.fixture(scope="function", autouse=True) -def reset_metric_set(): - metrics = Metrics() - metrics.clear_metrics() - metrics.clear_default_dimensions() - reset_cold_start_flag() # ensure each test has cold start - yield - - -@pytest.fixture -def metric_with_resolution() -> Dict[str, Union[str, int]]: - return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.High} - - -@pytest.fixture -def metric() -> Dict[str, str]: - return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1} - - -@pytest.fixture -def metrics() -> List[Dict[str, str]]: - return [ - {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, - {"name": "metric_two", "unit": MetricUnit.Count, "value": 1}, - ] - - -@pytest.fixture -def metrics_same_name() -> List[Dict[str, str]]: - return [ - {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, - {"name": "metric_one", "unit": MetricUnit.Count, "value": 5}, - ] - - -@pytest.fixture -def dimension() -> Dict[str, str]: - return {"name": "test_dimension", "value": "test"} - - -@pytest.fixture -def dimensions() -> List[Dict[str, str]]: - return [ - {"name": "test_dimension", "value": "test"}, - {"name": "test_dimension_2", "value": "test"}, - ] - - -@pytest.fixture -def non_str_dimensions() -> List[Dict[str, Any]]: - return [ - {"name": "test_dimension", "value": True}, - {"name": "test_dimension_2", "value": 3}, - ] - - -@pytest.fixture -def namespace() -> str: - return "test_namespace" - - -@pytest.fixture -def service() -> str: - return "test_service" - - -@pytest.fixture -def metadata() -> Dict[str, str]: - return {"key": "username", "value": "test"} - - -@pytest.fixture -def a_hundred_metrics() -> List[Dict[str, str]]: - return [{"name": f"metric_{i}", "unit": "Count", "value": 1} for i in range(100)] - - -@pytest.fixture -def a_hundred_metric_values() -> List[Dict[str, str]]: - return [{"name": "metric", "unit": "Count", "value": i} for i in range(100)] def serialize_metrics( @@ -1267,168 +1183,3 @@ def lambda_handler(evt, ctx): output = capture_metrics_output_multiple_emf_objects(capsys) assert len(output) == 2 - - -@pytest.fixture -def metrics_provider() -> BaseProvider: - class MetricsProvider: - def __init__(self): - self.metric_store: List = [] - self.result: str - super().__init__() - - def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): - self.metric_store.append({"name": name, "value": value, "tag": tag}) - - def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): - if raise_on_empty_metrics and len(self.metric_store) == 0: - raise SchemaValidationError("Must contain at least one metric.") - - self.result = json.dumps(self.metric_store) - - def flush(self, *args, **kwargs): - print(self.result) - - def clear(self): - self.result = "" - self.metric_store = [] - - return MetricsProvider - - -@pytest.fixture -def metrics_class() -> BaseProvider: - class MetricsClass(BaseProvider): - def __init__(self, provider): - self.provider = provider - super().__init__() - - def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): - self.provider.add_metric(name=name, value=value, tag=tag) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - self.provider.serialize_metric_set(raise_on_empty_metrics=raise_on_empty_metrics) - self.provider.flush() - self.provider.clear() - - def add_cold_start_metric(self, context: Any) -> None: - self.provider.add_metric(name="ColdStart", value=1, function_name=context.function_name) - - def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): - if raise_on_empty_metrics and len(self.metric_store) == 0: - raise SchemaValidationError("Must contain at least one metric.") - - self.result = self.provider.flush() - - def clear_metrics(self) -> None: - self.provider.clear_metrics() - - return MetricsClass - - -def test_metrics_provider_basic(capsys, metrics_provider, metric): - provider = metrics_provider() - provider.add_metric(**metric) - provider.serialize_metric_set() - provider.flush() - output = capture_metrics_output(capsys) - assert output[0]["name"] == metric["name"] - assert output[0]["value"] == metric["value"] - - -def test_metrics_provider_class_basic(capsys, metrics_provider, metrics_class, metric): - metrics = metrics_class(provider=metrics_provider()) - metrics.add_metric(**metric) - metrics.flush_metrics() - output = capture_metrics_output(capsys) - assert output[0]["name"] == metric["name"] - assert output[0]["value"] == metric["value"] - - -def test_metrics_provider_class_decorate(metrics_class, metrics_provider): - # GIVEN Metrics is initialized - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used to serialize metrics - @my_metrics.log_metrics - def lambda_handler(evt, context): - return True - - # THEN log_metrics should invoke the function it decorates - # and return no error if we have a namespace and dimension - assert lambda_handler({}, {}) is True - - -def test_metrics_provider_class_coldstart(capsys, metrics_provider, metrics_class): - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used with capture_cold_start_metric - @my_metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, context): - pass - - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - - output = capture_metrics_output(capsys) - - # THEN ColdStart metric and function_name and service dimension should be logged - assert output[0]["name"] == "ColdStart" - - -def test_metrics_provider_class_no_coldstart(capsys, metrics_provider, metrics_class): - reset_cold_start_flag_provider() - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used with capture_cold_start_metric - @my_metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, context): - pass - - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - _ = capture_metrics_output(capsys) - # drop first one - - lambda_handler({}, LambdaContext("example_fn")) - output = capture_metrics_output(capsys) - - # no coldstart is here - assert "ColdStart" not in json.dumps(output) - - -def test_metric_provider_raise_on_empty_metrics(metrics_provider, metrics_class): - # GIVEN Metrics is initialized - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics - @my_metrics.log_metrics(raise_on_empty_metrics=True) - def lambda_handler(evt, context): - pass - - # THEN the raised exception should be SchemaValidationError - # and specifically about the lack of Metrics - with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): - lambda_handler({}, {}) - - -def test_log_metrics_capture_cold_start_metric_once_with_provider_and_ephemeral(capsys, namespace, service): - # GIVEN Metrics is initialized - my_metrics = Metrics(service=service, namespace=namespace) - my_isolated_metrics = EphemeralMetrics(service=service, namespace=namespace) - - # WHEN log_metrics is used with capture_cold_start_metric - @my_metrics.log_metrics(capture_cold_start_metric=True) - @my_isolated_metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, context): - pass - - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - - output = capture_metrics_output(capsys) - - # THEN ColdStart metric and function_name and service dimension should be logged - assert output["ColdStart"] == [1.0] - assert output["function_name"] == "example_fn" - assert output["service"] == service diff --git a/tests/functional/metrics/test_metrics_provider.py b/tests/functional/metrics/test_metrics_provider.py new file mode 100644 index 00000000000..62332df019b --- /dev/null +++ b/tests/functional/metrics/test_metrics_provider.py @@ -0,0 +1,182 @@ +import json +from collections import namedtuple +from typing import Any, List + +import pytest + +from aws_lambda_powertools.metrics import ( + EphemeralMetrics, + Metrics, + SchemaValidationError, +) +from aws_lambda_powertools.metrics.provider import BaseProvider +from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider + + +def capture_metrics_output(capsys): + return json.loads(capsys.readouterr().out.strip()) + + +@pytest.fixture +def metrics_provider() -> BaseProvider: + class MetricsProvider: + def __init__(self): + self.metric_store: List = [] + self.result: str + super().__init__() + + def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): + self.metric_store.append({"name": name, "value": value, "tag": tag}) + + def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + if raise_on_empty_metrics and len(self.metric_store) == 0: + raise SchemaValidationError("Must contain at least one metric.") + + self.result = json.dumps(self.metric_store) + + def flush(self, *args, **kwargs): + print(self.result) + + def clear(self): + self.result = "" + self.metric_store = [] + + return MetricsProvider + + +@pytest.fixture +def metrics_class() -> BaseProvider: + class MetricsClass(BaseProvider): + def __init__(self, provider): + self.provider = provider + super().__init__() + + def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): + self.provider.add_metric(name=name, value=value, tag=tag) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + self.provider.serialize_metric_set(raise_on_empty_metrics=raise_on_empty_metrics) + self.provider.flush() + self.provider.clear() + + def add_cold_start_metric(self, context: Any) -> None: + self.provider.add_metric(name="ColdStart", value=1, function_name=context.function_name) + + def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + if raise_on_empty_metrics and len(self.metric_store) == 0: + raise SchemaValidationError("Must contain at least one metric.") + + self.result = self.provider.flush() + + def clear_metrics(self) -> None: + self.provider.clear_metrics() + + return MetricsClass + + +def test_metrics_provider_basic(capsys, metrics_provider, metric): + provider = metrics_provider() + provider.add_metric(**metric) + provider.serialize_metric_set() + provider.flush() + output = capture_metrics_output(capsys) + assert output[0]["name"] == metric["name"] + assert output[0]["value"] == metric["value"] + + +def test_metrics_provider_class_basic(capsys, metrics_provider, metrics_class, metric): + metrics = metrics_class(provider=metrics_provider()) + metrics.add_metric(**metric) + metrics.flush_metrics() + output = capture_metrics_output(capsys) + assert output[0]["name"] == metric["name"] + assert output[0]["value"] == metric["value"] + + +def test_metrics_provider_class_decorate(metrics_class, metrics_provider): + # GIVEN Metrics is initialized + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used to serialize metrics + @my_metrics.log_metrics + def lambda_handler(evt, context): + return True + + # THEN log_metrics should invoke the function it decorates + # and return no error if we have a namespace and dimension + assert lambda_handler({}, {}) is True + + +def test_metrics_provider_class_coldstart(capsys, metrics_provider, metrics_class): + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used with capture_cold_start_metric + @my_metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, context): + pass + + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + + output = capture_metrics_output(capsys) + + # THEN ColdStart metric and function_name and service dimension should be logged + assert output[0]["name"] == "ColdStart" + + +def test_metrics_provider_class_no_coldstart(capsys, metrics_provider, metrics_class): + reset_cold_start_flag_provider() + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used with capture_cold_start_metric + @my_metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, context): + pass + + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + _ = capture_metrics_output(capsys) + # drop first one + + lambda_handler({}, LambdaContext("example_fn")) + output = capture_metrics_output(capsys) + + # no coldstart is here + assert "ColdStart" not in json.dumps(output) + + +def test_metric_provider_raise_on_empty_metrics(metrics_provider, metrics_class): + # GIVEN Metrics is initialized + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics + @my_metrics.log_metrics(raise_on_empty_metrics=True) + def lambda_handler(evt, context): + pass + + # THEN the raised exception should be SchemaValidationError + # and specifically about the lack of Metrics + with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): + lambda_handler({}, {}) + + +def test_log_metrics_capture_cold_start_metric_once_with_provider_and_ephemeral(capsys, namespace, service): + # GIVEN Metrics is initialized + my_metrics = Metrics(service=service, namespace=namespace) + my_isolated_metrics = EphemeralMetrics(service=service, namespace=namespace) + + # WHEN log_metrics is used with capture_cold_start_metric + @my_metrics.log_metrics(capture_cold_start_metric=True) + @my_isolated_metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, context): + pass + + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + + output = capture_metrics_output(capsys) + + # THEN ColdStart metric and function_name and service dimension should be logged + assert output["ColdStart"] == [1.0] + assert output["function_name"] == "example_fn" + assert output["service"] == service diff --git a/tests/unit/metrics/test_metrics.py b/tests/unit/metrics/test_metrics.py deleted file mode 100644 index 7fa635c6c27..00000000000 --- a/tests/unit/metrics/test_metrics.py +++ /dev/null @@ -1,8 +0,0 @@ -from aws_lambda_powertools.metrics.provider import ( - BaseProvider, -) -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider - - -def test_amazoncloudwatchemf_is_subclass_of_metricsproviderbase(): - assert issubclass(AmazonCloudWatchEMFProvider, BaseProvider) From f3fc1a2cf954a46de8968c5e1a540bca4c6fbef5 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 8 Aug 2023 17:38:18 +0100 Subject: [PATCH 07/11] Moving files --- aws_lambda_powertools/metrics/{shared.py => functions.py} | 0 .../metrics/provider/cloudwatch_emf/cloudwatch.py | 8 ++++---- 2 files changed, 4 insertions(+), 4 deletions(-) rename aws_lambda_powertools/metrics/{shared.py => functions.py} (100%) diff --git a/aws_lambda_powertools/metrics/shared.py b/aws_lambda_powertools/metrics/functions.py similarity index 100% rename from aws_lambda_powertools/metrics/shared.py rename to aws_lambda_powertools/metrics/functions.py diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index ae612aa7aee..de824a0d277 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -11,13 +11,13 @@ from aws_lambda_powertools.metrics.base import single_metric from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.provider.base import BaseProvider -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit -from aws_lambda_powertools.metrics.shared import ( +from aws_lambda_powertools.metrics.functions import ( extract_cloudwatch_metric_resolution_value, extract_cloudwatch_metric_unit_value, ) +from aws_lambda_powertools.metrics.provider.base import BaseProvider +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice From 43e4e043364537f763578f53522e9e41787a1417 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 8 Aug 2023 23:51:36 +0100 Subject: [PATCH 08/11] Cleaning code --- aws_lambda_powertools/metrics/metrics.py | 3 --- aws_lambda_powertools/metrics/provider/base.py | 3 ++- .../metrics/provider/cloudwatch_emf/cloudwatch.py | 14 +++++++------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index bcf21051635..66f6fa2c07b 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -138,9 +138,6 @@ def log_metrics( default_dimensions=default_dimensions, ) - def _add_cold_start_metric(self, context: Any) -> None: - self.provider._add_cold_start_metric(context=context) - def set_default_dimensions(self, **dimensions) -> None: self.provider.set_default_dimensions(**dimensions) """Persist dimensions across Lambda invocations diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 818409bfdde..d36497ded3f 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -209,7 +209,8 @@ def decorate(event, context): return decorate def _add_cold_start_metric(self, context: Any) -> None: - """Add cold start metric and function_name dimension + """ + Check if it's cold start and add a metric if yes Parameters ---------- diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index de824a0d277..fad98e4e8c4 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -26,15 +26,16 @@ class AmazonCloudWatchEMFProvider(BaseProvider): - """Base class for metric functionality (namespace, metric, dimension, serialization) + """ + AmazonCloudWatchEMFProvider class (namespace, metric, dimension, serialization) - MetricManager creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). + AmazonCloudWatchEMFProvider creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). CloudWatch EMF can create up to 100 metrics per EMF object - and metrics, dimensions, and namespace created via MetricManager + and metrics, dimensions, and namespace created via AmazonCloudWatchEMFProvider will adhere to the schema, will be serialized and validated against EMF Schema. - **Use `aws_lambda_powertools.metrics.metrics.Metrics` or - `aws_lambda_powertools.metrics.metric.single_metric` to create EMF metrics.** + **Use `aws_lambda_powertools.Metrics` or + `aws_lambda_powertools.single_metric` to create EMF metrics.** Environment variables --------------------- @@ -362,8 +363,7 @@ def handler(event, context): captures cold start metric, by default False raise_on_empty_metrics : bool, optional raise exception if no metrics are emitted, by default False - default_dimensions: Dict[str, str], optional - metric dimensions as key=value that will always be present + **kwargs Raises ------ From 57045bfd85977149e1a31970cb4281dbc71c7da3 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 10:24:44 +0100 Subject: [PATCH 09/11] Addressing Heitor's feedback --- aws_lambda_powertools/metrics/provider/base.py | 16 +++++++--------- .../provider/cloudwatch_emf/cloudwatch.py | 10 ++++++++-- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index d36497ded3f..8bd2440658a 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -6,16 +6,16 @@ from typing import Any, Callable, Dict, Optional from aws_lambda_powertools.metrics.provider import cold_start +from aws_lambda_powertools.utilities.typing import LambdaContext logger = logging.getLogger(__name__) class BaseProvider(ABC): """ - Class for metric provider interface. + Interface to create a metrics provider. - This class serves as an interface for creating your own metric provider. Inherit from this class - and implement the required methods to define your specific metric provider. + BaseProvider implements `log_metrics` decorator for every provider as a value add feature. Usage: 1. Inherit from this class. @@ -97,11 +97,11 @@ def flush_metrics(self, *args: Any, **kwargs) -> Any: raise NotImplementedError @abstractmethod - def clear_metrics(self, *args: Any, **kwargs) -> Any: + def clear_metrics(self, *args: Any, **kwargs) -> None: """ Abstract method for clear metric instance. - This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + This method must be implemented in subclasses to clear the metric instance Parameters ---------- @@ -118,7 +118,7 @@ def clear_metrics(self, *args: Any, **kwargs) -> Any: raise NotImplementedError @abstractmethod - def add_cold_start_metric(self, context: Any) -> Any: + def add_cold_start_metric(self, context: LambdaContext) -> Any: """ Abstract method for clear metric instance. @@ -196,8 +196,6 @@ def handler(event, context): @functools.wraps(lambda_handler) def decorate(event, context): try: - if default_dimensions: - self.set_default_dimensions(**default_dimensions) response = lambda_handler(event, context) if capture_cold_start_metric: self._add_cold_start_metric(context=context) @@ -210,7 +208,7 @@ def decorate(event, context): def _add_cold_start_metric(self, context: Any) -> None: """ - Check if it's cold start and add a metric if yes + Add cold start metric Parameters ---------- diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index fad98e4e8c4..e5acbd5db15 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -21,13 +21,14 @@ from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice +from aws_lambda_powertools.utilities.typing import LambdaContext logger = logging.getLogger(__name__) class AmazonCloudWatchEMFProvider(BaseProvider): """ - AmazonCloudWatchEMFProvider class (namespace, metric, dimension, serialization) + AmazonCloudWatchEMFProvider creates metrics asynchronously via CloudWatch Embedded Metric Format (EMF). AmazonCloudWatchEMFProvider creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). CloudWatch EMF can create up to 100 metrics per EMF object @@ -371,6 +372,11 @@ def handler(event, context): Propagate error received """ + default_dimensions = kwargs.get("default_dimensions") + + if default_dimensions: + self.set_default_dimensions(**default_dimensions) + return super().log_metrics( lambda_handler=lambda_handler, capture_cold_start_metric=capture_cold_start_metric, @@ -378,7 +384,7 @@ def handler(event, context): **kwargs, ) - def add_cold_start_metric(self, context: Any) -> None: + def add_cold_start_metric(self, context: LambdaContext) -> None: """Add cold start metric and function_name dimension Parameters From fb2e2603d83881f83f08a683aec54016409d0fff Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 11:26:09 +0100 Subject: [PATCH 10/11] Addressing Heitor's feedback --- aws_lambda_powertools/metrics/metrics.py | 3 +- .../provider/cloudwatch_emf/cloudwatch.py | 6 +- .../metrics/provider/cloudwatch_emf/types.py | 24 +++ .../metrics/test_metrics_cloudwatch_emf.py | 26 ++- .../metrics/test_metrics_provider.py | 199 ++++++------------ 5 files changed, 118 insertions(+), 140 deletions(-) create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf/types.py diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 66f6fa2c07b..900e0da7dd7 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -5,6 +5,7 @@ from aws_lambda_powertools.metrics.base import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.types import CloudWatchEMFOutput class Metrics: @@ -115,7 +116,7 @@ def serialize_metric_set( metrics: Dict | None = None, dimensions: Dict | None = None, metadata: Dict | None = None, - ) -> Dict: + ) -> CloudWatchEMFOutput: return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) def add_metadata(self, key: str, value: Any) -> None: diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index e5acbd5db15..ce15a4206f1 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -18,6 +18,7 @@ from aws_lambda_powertools.metrics.provider.base import BaseProvider from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.types import CloudWatchEMFOutput from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice @@ -153,7 +154,7 @@ def serialize_metric_set( metrics: Dict | None = None, dimensions: Dict | None = None, metadata: Dict | None = None, - ) -> Dict: + ) -> CloudWatchEMFOutput: """Serializes metric and dimensions set Parameters @@ -239,7 +240,8 @@ def serialize_metric_set( }, ], }, - **dimensions, # "service": "test_service" + # NOTE: Mypy doesn't recognize splats '** syntax' in TypedDict + **dimensions, # type: ignore[misc] # "service": "test_service" **metadata, # "username": "test" **metric_names_and_values, # "single_metric": 1.0 } diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/types.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/types.py new file mode 100644 index 00000000000..bf3a48ea13f --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/types.py @@ -0,0 +1,24 @@ +from typing import List + +from typing_extensions import NotRequired, TypedDict + + +class CloudWatchEMFMetric(TypedDict): + Name: str + Unit: str + StorageResolution: NotRequired[int] + + +class CloudWatchEMFMetrics(TypedDict): + Namespace: str + Dimensions: List[List[str]] # [ [ 'test_dimension' ] ] + Metrics: List[CloudWatchEMFMetric] + + +class CloudWatchEMFRoot(TypedDict): + Timestamp: int + CloudWatchMetrics: List[CloudWatchEMFMetrics] + + +class CloudWatchEMFOutput(TypedDict): + _aws: CloudWatchEMFRoot diff --git a/tests/functional/metrics/test_metrics_cloudwatch_emf.py b/tests/functional/metrics/test_metrics_cloudwatch_emf.py index 101c3d86883..5c4a1de1128 100644 --- a/tests/functional/metrics/test_metrics_cloudwatch_emf.py +++ b/tests/functional/metrics/test_metrics_cloudwatch_emf.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json import warnings from collections import namedtuple @@ -16,16 +18,23 @@ SchemaValidationError, single_metric, ) -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider -from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import ( + AmazonCloudWatchEMFProvider, +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import ( + MAX_DIMENSIONS, +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.types import ( + CloudWatchEMFOutput, +) def serialize_metrics( metrics: List[Dict], dimensions: List[Dict], namespace: str, - metadatas: List[Dict] = None, -) -> Dict: + metadatas: List[Dict] | None = None, +) -> CloudWatchEMFOutput: """Helper function to build EMF object from a list of metrics, dimensions""" my_metrics = AmazonCloudWatchEMFProvider(namespace=namespace) for dimension in dimensions: @@ -42,7 +51,12 @@ def serialize_metrics( return my_metrics.serialize_metric_set() -def serialize_single_metric(metric: Dict, dimension: Dict, namespace: str, metadata: Dict = None) -> Dict: +def serialize_single_metric( + metric: Dict, + dimension: Dict, + namespace: str, + metadata: Dict | None = None, +) -> CloudWatchEMFOutput: """Helper function to build EMF object from a given metric, dimension and namespace""" my_metrics = AmazonCloudWatchEMFProvider(namespace=namespace) my_metrics.add_metric(**metric) @@ -64,7 +78,7 @@ def capture_metrics_output(capsys): return json.loads(capsys.readouterr().out.strip()) -def capture_metrics_output_multiple_emf_objects(capsys): +def capture_metrics_output_multiple_emf_objects(capsys) -> List[CloudWatchEMFOutput]: return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line] diff --git a/tests/functional/metrics/test_metrics_provider.py b/tests/functional/metrics/test_metrics_provider.py index 62332df019b..11479aea8cd 100644 --- a/tests/functional/metrics/test_metrics_provider.py +++ b/tests/functional/metrics/test_metrics_provider.py @@ -1,91 +1,103 @@ -import json -from collections import namedtuple -from typing import Any, List +from __future__ import annotations -import pytest +import json +from typing import Any, Callable, Dict, List, Optional from aws_lambda_powertools.metrics import ( - EphemeralMetrics, - Metrics, SchemaValidationError, ) from aws_lambda_powertools.metrics.provider import BaseProvider -from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider +from aws_lambda_powertools.utilities.typing import LambdaContext def capture_metrics_output(capsys): return json.loads(capsys.readouterr().out.strip()) -@pytest.fixture -def metrics_provider() -> BaseProvider: - class MetricsProvider: - def __init__(self): - self.metric_store: List = [] - self.result: str - super().__init__() +class FakeMetricsProvider(BaseProvider): + def __init__(self): + self.metric_store: List = [] - def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): - self.metric_store.append({"name": name, "value": value, "tag": tag}) + def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): + self.metric_store.append({"name": name, "value": value}) - def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): - if raise_on_empty_metrics and len(self.metric_store) == 0: - raise SchemaValidationError("Must contain at least one metric.") + def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + if raise_on_empty_metrics and len(self.metric_store) == 0: + raise SchemaValidationError("Must contain at least one metric.") - self.result = json.dumps(self.metric_store) + self.result = json.dumps(self.metric_store) - def flush(self, *args, **kwargs): - print(self.result) + def flush_metrics(self, *args, **kwargs): + print(json.dumps(self.metric_store)) - def clear(self): - self.result = "" - self.metric_store = [] + def clear_metrics(self): + self.metric_store = [] - return MetricsProvider + def add_cold_start_metric(self, context: LambdaContext) -> Any: + self.metric_store.append({"name": "ColdStart", "value": 1, "function_name": context.function_name}) + def log_metrics( + self, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + **kwargs, + ): + return super().log_metrics( + lambda_handler=lambda_handler, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + ) -@pytest.fixture -def metrics_class() -> BaseProvider: - class MetricsClass(BaseProvider): - def __init__(self, provider): - self.provider = provider - super().__init__() - def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): - self.provider.add_metric(name=name, value=value, tag=tag) +class FakeMetricsClass: + def __init__(self, provider: FakeMetricsProvider | None = None): + if provider is None: + self.provider = FakeMetricsProvider() + else: + self.provider = provider - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - self.provider.serialize_metric_set(raise_on_empty_metrics=raise_on_empty_metrics) - self.provider.flush() - self.provider.clear() + def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): + self.provider.add_metric(name=name, value=value, tag=tag) - def add_cold_start_metric(self, context: Any) -> None: - self.provider.add_metric(name="ColdStart", value=1, function_name=context.function_name) + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) - def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): - if raise_on_empty_metrics and len(self.metric_store) == 0: - raise SchemaValidationError("Must contain at least one metric.") + def add_cold_start_metric(self, context: LambdaContext) -> None: + self.provider.add_cold_start_metric(context=context) - self.result = self.provider.flush() + def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + self.provider.serialize_metric_set(raise_on_empty_metrics=raise_on_empty_metrics) - def clear_metrics(self) -> None: - self.provider.clear_metrics() + def clear_metrics(self) -> None: + self.provider.clear_metrics() - return MetricsClass + def log_metrics( + self, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + default_dimensions: Dict[str, str] | None = None, + ): + return self.provider.log_metrics( + lambda_handler=lambda_handler, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + ) -def test_metrics_provider_basic(capsys, metrics_provider, metric): - provider = metrics_provider() - provider.add_metric(**metric) - provider.serialize_metric_set() - provider.flush() +def test_metrics_class_with_default_provider(capsys, metric): + metrics = FakeMetricsClass() + metrics.add_metric(**metric) + metrics.flush_metrics() output = capture_metrics_output(capsys) assert output[0]["name"] == metric["name"] assert output[0]["value"] == metric["value"] -def test_metrics_provider_class_basic(capsys, metrics_provider, metrics_class, metric): - metrics = metrics_class(provider=metrics_provider()) +def test_metrics_class_with_custom_provider(capsys, metric): + provider = FakeMetricsProvider() + metrics = FakeMetricsClass(provider=provider) metrics.add_metric(**metric) metrics.flush_metrics() output = capture_metrics_output(capsys) @@ -93,9 +105,9 @@ def test_metrics_provider_class_basic(capsys, metrics_provider, metrics_class, m assert output[0]["value"] == metric["value"] -def test_metrics_provider_class_decorate(metrics_class, metrics_provider): +def test_metrics_provider_class_decorate(): # GIVEN Metrics is initialized - my_metrics = metrics_class(provider=metrics_provider()) + my_metrics = FakeMetricsClass() # WHEN log_metrics is used to serialize metrics @my_metrics.log_metrics @@ -105,78 +117,3 @@ def lambda_handler(evt, context): # THEN log_metrics should invoke the function it decorates # and return no error if we have a namespace and dimension assert lambda_handler({}, {}) is True - - -def test_metrics_provider_class_coldstart(capsys, metrics_provider, metrics_class): - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used with capture_cold_start_metric - @my_metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, context): - pass - - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - - output = capture_metrics_output(capsys) - - # THEN ColdStart metric and function_name and service dimension should be logged - assert output[0]["name"] == "ColdStart" - - -def test_metrics_provider_class_no_coldstart(capsys, metrics_provider, metrics_class): - reset_cold_start_flag_provider() - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used with capture_cold_start_metric - @my_metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, context): - pass - - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - _ = capture_metrics_output(capsys) - # drop first one - - lambda_handler({}, LambdaContext("example_fn")) - output = capture_metrics_output(capsys) - - # no coldstart is here - assert "ColdStart" not in json.dumps(output) - - -def test_metric_provider_raise_on_empty_metrics(metrics_provider, metrics_class): - # GIVEN Metrics is initialized - my_metrics = metrics_class(provider=metrics_provider()) - - # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics - @my_metrics.log_metrics(raise_on_empty_metrics=True) - def lambda_handler(evt, context): - pass - - # THEN the raised exception should be SchemaValidationError - # and specifically about the lack of Metrics - with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): - lambda_handler({}, {}) - - -def test_log_metrics_capture_cold_start_metric_once_with_provider_and_ephemeral(capsys, namespace, service): - # GIVEN Metrics is initialized - my_metrics = Metrics(service=service, namespace=namespace) - my_isolated_metrics = EphemeralMetrics(service=service, namespace=namespace) - - # WHEN log_metrics is used with capture_cold_start_metric - @my_metrics.log_metrics(capture_cold_start_metric=True) - @my_isolated_metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, context): - pass - - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn")) - - output = capture_metrics_output(capsys) - - # THEN ColdStart metric and function_name and service dimension should be logged - assert output["ColdStart"] == [1.0] - assert output["function_name"] == "example_fn" - assert output["service"] == service From 600307f6039998556c931b084e876ce3ef2734d4 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 12:49:49 +0100 Subject: [PATCH 11/11] Addressing Heitor's feedback --- .../provider/cloudwatch_emf/cloudwatch.py | 1 - .../metrics/test_metrics_provider.py | 67 ++----------------- 2 files changed, 5 insertions(+), 63 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index ce15a4206f1..16be60112c3 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -31,7 +31,6 @@ class AmazonCloudWatchEMFProvider(BaseProvider): """ AmazonCloudWatchEMFProvider creates metrics asynchronously via CloudWatch Embedded Metric Format (EMF). - AmazonCloudWatchEMFProvider creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). CloudWatch EMF can create up to 100 metrics per EMF object and metrics, dimensions, and namespace created via AmazonCloudWatchEMFProvider will adhere to the schema, will be serialized and validated against EMF Schema. diff --git a/tests/functional/metrics/test_metrics_provider.py b/tests/functional/metrics/test_metrics_provider.py index 11479aea8cd..2ed84a23a21 100644 --- a/tests/functional/metrics/test_metrics_provider.py +++ b/tests/functional/metrics/test_metrics_provider.py @@ -1,11 +1,12 @@ from __future__ import annotations import json -from typing import Any, Callable, Dict, List, Optional +from typing import Any, List from aws_lambda_powertools.metrics import ( SchemaValidationError, ) +from aws_lambda_powertools.metrics.metrics import Metrics from aws_lambda_powertools.metrics.provider import BaseProvider from aws_lambda_powertools.utilities.typing import LambdaContext @@ -31,73 +32,15 @@ def flush_metrics(self, *args, **kwargs): print(json.dumps(self.metric_store)) def clear_metrics(self): - self.metric_store = [] + self.metric_store.clear() def add_cold_start_metric(self, context: LambdaContext) -> Any: self.metric_store.append({"name": "ColdStart", "value": 1, "function_name": context.function_name}) - def log_metrics( - self, - lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, - capture_cold_start_metric: bool = False, - raise_on_empty_metrics: bool = False, - **kwargs, - ): - return super().log_metrics( - lambda_handler=lambda_handler, - capture_cold_start_metric=capture_cold_start_metric, - raise_on_empty_metrics=raise_on_empty_metrics, - ) - - -class FakeMetricsClass: - def __init__(self, provider: FakeMetricsProvider | None = None): - if provider is None: - self.provider = FakeMetricsProvider() - else: - self.provider = provider - - def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): - self.provider.add_metric(name=name, value=value, tag=tag) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) - - def add_cold_start_metric(self, context: LambdaContext) -> None: - self.provider.add_cold_start_metric(context=context) - - def serialize_metric_set(self, raise_on_empty_metrics: bool = False, *args, **kwargs): - self.provider.serialize_metric_set(raise_on_empty_metrics=raise_on_empty_metrics) - - def clear_metrics(self) -> None: - self.provider.clear_metrics() - - def log_metrics( - self, - lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, - capture_cold_start_metric: bool = False, - raise_on_empty_metrics: bool = False, - default_dimensions: Dict[str, str] | None = None, - ): - return self.provider.log_metrics( - lambda_handler=lambda_handler, - capture_cold_start_metric=capture_cold_start_metric, - raise_on_empty_metrics=raise_on_empty_metrics, - ) - - -def test_metrics_class_with_default_provider(capsys, metric): - metrics = FakeMetricsClass() - metrics.add_metric(**metric) - metrics.flush_metrics() - output = capture_metrics_output(capsys) - assert output[0]["name"] == metric["name"] - assert output[0]["value"] == metric["value"] - def test_metrics_class_with_custom_provider(capsys, metric): provider = FakeMetricsProvider() - metrics = FakeMetricsClass(provider=provider) + metrics = Metrics(provider=provider) metrics.add_metric(**metric) metrics.flush_metrics() output = capture_metrics_output(capsys) @@ -107,7 +50,7 @@ def test_metrics_class_with_custom_provider(capsys, metric): def test_metrics_provider_class_decorate(): # GIVEN Metrics is initialized - my_metrics = FakeMetricsClass() + my_metrics = Metrics() # WHEN log_metrics is used to serialize metrics @my_metrics.log_metrics