From b5f2ed38c93ac7c60de1743df22283bfaed29fcc Mon Sep 17 00:00:00 2001 From: FlorianVeaux Date: Wed, 23 Dec 2020 17:46:44 +0100 Subject: [PATCH] Make 'Context limit reached' message a warning --- .../datadog_checks/kafka_consumer/kafka_consumer.py | 2 +- .../datadog_checks/kafka_consumer/legacy_0_10_2.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py b/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py index 665c16cece2e0..0af8a4805cbd6 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py +++ b/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py @@ -106,7 +106,7 @@ def check(self, instance): if len(self._consumer_offsets) < self._context_limit: self._get_highwater_offsets() else: - self.log.debug("Context limit reached. Skipping highwater offset collection.") + self.warning("Context limit reached. Skipping highwater offset collection.") except Exception: self.log.exception("There was a problem collecting the highwater mark offsets.") # Unlike consumer offsets, fail immediately because we can't calculate consumer lag w/o highwater_offsets diff --git a/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py b/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py index 7a20627897395..43416712316cd 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py +++ b/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py @@ -200,7 +200,7 @@ def _get_highwater_offsets(self, contexts_limit): for broker in self._kafka_client.cluster.brokers(): if len(self._highwater_offsets) >= contexts_limit: - self.log.debug("Context limit reached. Skipping highwater offsets collection.") + self.warning("Context limit reached. Skipping highwater offsets collection.") return broker_led_partitions = self._kafka_client.cluster.partitions_for_broker(broker.nodeId) if broker_led_partitions is None: @@ -237,7 +237,7 @@ def _process_highwater_offsets(self, response, contexts_limit): if error_type is kafka_errors.NoError: self._highwater_offsets[(topic, partition)] = offsets[0] if len(self._highwater_offsets) >= contexts_limit: - self.log.debug("Context limit reached. Skipping highwater offsets processing.") + self.warning("Context limit reached. Skipping highwater offsets processing.") return elif error_type is kafka_errors.NotLeaderForPartitionError: self.log.warning( @@ -374,7 +374,7 @@ def _get_zk_consumer_offsets(self, contexts_limit): self._zk_consumer_offsets[key] = consumer_offset if len(self._zk_consumer_offsets) >= contexts_limit: - self.log.debug("Context limit reached. Skipping zk consumer offsets collection.") + self.warning("Context limit reached. Skipping zk consumer offsets collection.") return except NoNodeError: self.log.info('No zookeeper node at %s', zk_path) @@ -424,7 +424,7 @@ def _get_kafka_consumer_offsets(self, contexts_limit): self._kafka_consumer_offsets[key] = offset if len(self._kafka_consumer_offsets) >= contexts_limit: - self.log.debug("Context limit reached. Skipping kafka consumer offsets collection.") + self.warning("Context limit reached. Skipping kafka consumer offsets collection.") return else: self.log.info("unable to find group coordinator for %s", consumer_group)