diff --git a/kafka_consumer/datadog_checks/kafka_consumer/new_kafka_consumer.py b/kafka_consumer/datadog_checks/kafka_consumer/new_kafka_consumer.py index 7a11134431b6f..4f92be90cc0ce 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/new_kafka_consumer.py +++ b/kafka_consumer/datadog_checks/kafka_consumer/new_kafka_consumer.py @@ -284,8 +284,8 @@ def _report_consumer_offsets_and_lag(self, contexts_limit): if reported_contexts >= contexts_limit: continue timestamps = self._broker_timestamps["{}_{}".format(topic, partition)] - # producer_timestamp is set in the same check, so it should never be None - producer_timestamp = timestamps[producer_offset] + # The producer timestamp can be not set if there was an error fetching broker offsets. + producer_timestamp = timestamps.get(producer_offset, None) consumer_timestamp = self._get_interpolated_timestamp(timestamps, consumer_offset) if consumer_timestamp is None or producer_timestamp is None: continue