diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index a0e24f1dcf9..2b97df75c1f 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -1,10 +1,12 @@ import functools import inspect +import io import logging import os import random import sys -from typing import IO, Any, Callable, Dict, Iterable, Optional, TypeVar, Union +import traceback +from typing import IO, Any, Callable, Dict, Iterable, Mapping, Optional, TypeVar, Union import jmespath @@ -235,6 +237,9 @@ def _init_logger(self, **kwargs): self._logger.addHandler(self.logger_handler) self.structure_logs(**kwargs) + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + self._logger.findCaller = self.findCaller + # Pytest Live Log feature duplicates log records for colored output # but we explicitly add a filter for log deduplication. # This flag disables this protection when you explicit want logs to be duplicated (#262) @@ -359,6 +364,126 @@ def decorate(event, context, *args, **kwargs): return decorate + def info( + self, + msg: object, + *args, + exc_info=None, + stack_info: bool = False, + stacklevel: int = 2, + extra: Optional[Mapping[str, object]] = None, + **kwargs, + ): + extra = extra or {} + extra = {**extra, **kwargs} + + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + if sys.version_info < (3, 8): # pragma: no cover + return self._logger.info(msg, *args, exc_info=exc_info, stack_info=stack_info, extra=extra) + return self._logger.info( + msg, *args, exc_info=exc_info, stack_info=stack_info, stacklevel=stacklevel, extra=extra + ) + + def error( + self, + msg: object, + *args, + exc_info=None, + stack_info: bool = False, + stacklevel: int = 2, + extra: Optional[Mapping[str, object]] = None, + **kwargs, + ): + extra = extra or {} + extra = {**extra, **kwargs} + + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + if sys.version_info < (3, 8): # pragma: no cover + return self._logger.error(msg, *args, exc_info=exc_info, stack_info=stack_info, extra=extra) + return self._logger.error( + msg, *args, exc_info=exc_info, stack_info=stack_info, stacklevel=stacklevel, extra=extra + ) + + def exception( + self, + msg: object, + *args, + exc_info=True, + stack_info: bool = False, + stacklevel: int = 2, + extra: Optional[Mapping[str, object]] = None, + **kwargs, + ): + extra = extra or {} + extra = {**extra, **kwargs} + + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + if sys.version_info < (3, 8): # pragma: no cover + return self._logger.exception(msg, *args, exc_info=exc_info, stack_info=stack_info, extra=extra) + return self._logger.exception( + msg, *args, exc_info=exc_info, stack_info=stack_info, stacklevel=stacklevel, extra=extra + ) + + def critical( + self, + msg: object, + *args, + exc_info=None, + stack_info: bool = False, + stacklevel: int = 2, + extra: Optional[Mapping[str, object]] = None, + **kwargs, + ): + extra = extra or {} + extra = {**extra, **kwargs} + + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + if sys.version_info < (3, 8): # pragma: no cover + return self._logger.critical(msg, *args, exc_info=exc_info, stack_info=stack_info, extra=extra) + return self._logger.critical( + msg, *args, exc_info=exc_info, stack_info=stack_info, stacklevel=stacklevel, extra=extra + ) + + def warning( + self, + msg: object, + *args, + exc_info=None, + stack_info: bool = False, + stacklevel: int = 2, + extra: Optional[Mapping[str, object]] = None, + **kwargs, + ): + extra = extra or {} + extra = {**extra, **kwargs} + + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + if sys.version_info < (3, 8): # pragma: no cover + return self._logger.warning(msg, *args, exc_info=exc_info, stack_info=stack_info, extra=extra) + return self._logger.warning( + msg, *args, exc_info=exc_info, stack_info=stack_info, stacklevel=stacklevel, extra=extra + ) + + def debug( + self, + msg: object, + *args, + exc_info=None, + stack_info: bool = False, + stacklevel: int = 2, + extra: Optional[Mapping[str, object]] = None, + **kwargs, + ): + extra = extra or {} + extra = {**extra, **kwargs} + + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + if sys.version_info < (3, 8): # pragma: no cover + return self._logger.debug(msg, *args, exc_info=exc_info, stack_info=stack_info, extra=extra) + return self._logger.debug( + msg, *args, exc_info=exc_info, stack_info=stack_info, stacklevel=stacklevel, extra=extra + ) + def append_keys(self, **additional_keys): self.registered_formatter.append_keys(**additional_keys) @@ -462,6 +587,41 @@ def _get_caller_filename(): caller_frame = frame.f_back.f_back.f_back return caller_frame.f_globals["__name__"] + # Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work + def findCaller(self, stack_info=False, stacklevel=2): # pragma: no cover + """ + Find the stack frame of the caller so that we can note the source + file name, line number and function name. + """ + f = logging.currentframe() # noqa: VNE001 + # On some versions of IronPython, currentframe() returns None if + # IronPython isn't run with -X:Frames. + if f is None: + return "(unknown file)", 0, "(unknown function)", None + while stacklevel > 0: + next_f = f.f_back + if next_f is None: + ## We've got options here. + ## If we want to use the last (deepest) frame: + break + ## If we want to mimic the warnings module: + # return ("sys", 1, "(unknown function)", None) # noqa: E800 + ## If we want to be pedantic: # noqa: E800 + # raise ValueError("call stack is not deep enough") # noqa: E800 + f = next_f # noqa: VNE001 + if not _is_internal_frame(f): + stacklevel -= 1 + co = f.f_code + sinfo = None + if stack_info: + with io.StringIO() as sio: + sio.write("Stack (most recent call last):\n") + traceback.print_stack(f, file=sio) + sinfo = sio.getvalue() + if sinfo[-1] == "\n": + sinfo = sinfo[:-1] + return co.co_filename, f.f_lineno, co.co_name, sinfo + def set_package_logger( level: Union[str, int] = logging.DEBUG, @@ -500,3 +660,13 @@ def set_package_logger( handler = logging.StreamHandler(stream) handler.setFormatter(formatter) logger.addHandler(handler) + + +# Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work +# The following is based on warnings._is_internal_frame. It makes sure that +# frames of the import mechanism are skipped when logging at module level and +# using a stacklevel value greater than one. +def _is_internal_frame(frame): # pragma: no cover + """Signal whether the frame is a CPython or logging module internal.""" + filename = os.path.normcase(frame.f_code.co_filename) + return filename == logging._srcfile or ("importlib" in filename and "_bootstrap" in filename) diff --git a/docs/core/logger.md b/docs/core/logger.md index f98962a0f5f..8fc9da441ac 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -158,7 +158,7 @@ To ease routine tasks like extracting correlation ID from popular event sources, You can append additional keys using either mechanism: * Persist new keys across all future log messages via `append_keys` method -* Add additional keys on a per log message basis via `extra` parameter +* Add additional keys on a per log message basis as a keyword=value, or via `extra` parameter #### append_keys method @@ -184,14 +184,33 @@ You can append your own keys to your existing Logger via `append_keys(**addition This example will add `order_id` if its value is not empty, and in subsequent invocations where `order_id` might not be present it'll remove it from the Logger. +#### ephemeral metadata + +You can pass an arbitrary number of keyword arguments (kwargs) to all log level's methods, e.g. `logger.info, logger.warning`. + +Two common use cases for this feature is to enrich log statements with additional metadata, or only add certain keys conditionally. + +!!! info "Any keyword argument added will not be persisted in subsequent messages." + +=== "append_keys_kwargs.py" + + ```python hl_lines="8" + --8<-- "examples/logger/src/append_keys_kwargs.py" + ``` + +=== "append_keys_kwargs_output.json" + + ```json hl_lines="7" + --8<-- "examples/logger/src/append_keys_kwargs_output.json" + ``` + #### extra parameter Extra parameter is available for all log levels' methods, as implemented in the standard logging library - e.g. `logger.info, logger.warning`. It accepts any dictionary, and all keyword arguments will be added as part of the root structure of the logs for that log statement. -???+ info - Any keyword argument added using `extra` will not be persisted for subsequent messages. +!!! info "Any keyword argument added using `extra` will not be persisted in subsequent messages." === "append_keys_extra.py" diff --git a/examples/logger/src/append_keys_kwargs.py b/examples/logger/src/append_keys_kwargs.py new file mode 100644 index 00000000000..5885c7e2bd6 --- /dev/null +++ b/examples/logger/src/append_keys_kwargs.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +def handler(event: dict, context: LambdaContext) -> str: + logger.info("Collecting payment", request_id="1123") + + return "hello world" diff --git a/examples/logger/src/append_keys_kwargs_output.json b/examples/logger/src/append_keys_kwargs_output.json new file mode 100644 index 00000000000..cd888e5e2af --- /dev/null +++ b/examples/logger/src/append_keys_kwargs_output.json @@ -0,0 +1,8 @@ +{ + "level": "INFO", + "location": "collect.handler:8", + "message": "Collecting payment", + "timestamp": "2022-11-26 11:47:12,494+0200", + "service": "payment", + "request_id": "1123" +} \ No newline at end of file diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py index 7eb3018ef64..4876da5cacf 100644 --- a/tests/functional/test_logger.py +++ b/tests/functional/test_logger.py @@ -415,6 +415,25 @@ def test_logger_extra_kwargs(stdout, service_name): assert "request_id" not in no_extra_fields_log +def test_logger_arbitrary_fields_as_kwargs(stdout, service_name): + # GIVEN Logger is initialized + logger = Logger(service=service_name, stream=stdout) + + # WHEN `request_id` is an arbitrary field in a log message to the existing structured log + fields = {"request_id": "blah"} + + logger.info("with arbitrary fields", **fields) + logger.info("without extra fields") + + extra_fields_log, no_extra_fields_log = capture_multiple_logging_statements_output(stdout) + + # THEN first log should have request_id field in the root structure + assert "request_id" in extra_fields_log + + # THEN second log should not have request_id in the root structure + assert "request_id" not in no_extra_fields_log + + def test_logger_log_twice_when_log_filter_isnt_present_and_root_logger_is_setup(monkeypatch, stdout, service_name): # GIVEN Lambda configures the root logger with a handler root_logger = logging.getLogger()