From 9a81d3c295a80492ca8dcea3488d64ae41e584c5 Mon Sep 17 00:00:00 2001 From: gsalami00 Date: Mon, 30 Nov 2020 17:19:41 -0500 Subject: [PATCH 1/4] Add Kafka Consumer spec --- kafka_consumer/assets/configuration/spec.yaml | 246 ++++++++++++++++++ .../kafka_consumer/data/conf.yaml.example | 78 +++--- kafka_consumer/manifest.json | 5 +- 3 files changed, 293 insertions(+), 36 deletions(-) create mode 100644 kafka_consumer/assets/configuration/spec.yaml diff --git a/kafka_consumer/assets/configuration/spec.yaml b/kafka_consumer/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..3bf951ef4f19a --- /dev/null +++ b/kafka_consumer/assets/configuration/spec.yaml @@ -0,0 +1,246 @@ +name: Kafka Consumer +files: +- name: kafka_consumer.yaml + options: + - name: init_config + description: | + WARNING: To avoid blindly collecting offsets and lag for an unbounded number + of partitions (as could be the case after enabling monitor_unlisted_consumer_groups + or monitor_all_broker_highwatermarks) the check collects metrics for at most 500 partitions. + + DEPRECATION NOTICE: In the early days of Kafka, consumer offsets were stored in Zookeeper. + So this check currently supports fetching consumer offsets from both Kafka and Zookeeper. + However, Kafka 0.9 (released in 2015) deprecated Zookeeper storage. As a result, we have also + deprecated fetching consumer offsets from Zookeeper and at some point in the future that + functionality will be removed from this check. + options: + - name: kafka_timeout + description: Customizes the Kafka connection timeout. + value: + type: integer + example: 5 + default: 5 + - name: zk_timeout + description: | + DEPRECATED: Customizes the ZooKeeper connection timeout. + value: + type: integer + example: 5 + default: 5 + - name: instances + description: | + + options: + - name: kafka_connect_str + description: | + Kafka endpoints and port to connect to. + + In a production environment, it's often useful to specify multiple + Kafka nodes for a single check instance. This way you + only generate a single check process, but if one host goes down, + KafkaClient tries contacting the next host. + Details: https://github.com/DataDog/dd-agent/issues/2943 + required: True + value: + type: array + items: + type: string + example: + - localhost:9092 + - :9092 + - name: kafka_client_api_version + description: | + Specify the highest client protocol version supported by all brokers in the cluster. + + This is a performance optimization. If this is not set, then the check automatically probes + the cluster for broker version during the connection bootstrapping process. Explicitly setting + this bypasses that probe, saving 3-5 network calls depending on the broker version. Note that + probing randomly picks a broker to probe, so in a mixed-version cluster, probing returns a + non-deterministic result. + value: + type: string + example: "2.3.0" + - name: consumer_groups + description: | + Each level is optional. Any empty values are fetched from the Kafka cluster. + You can have empty partitions (example: ), topics (example: ), + and even consumer_groups. If you omit consumer_groups, you must set `monitor_unlisted_consumer_groups` to true. + + Deprecation notice: Omitting various levels works for zookeeper-based consumers. However, all + functionality related to fetching offsets from Zookeeper is deprecated and will be removed from this + check in the future. + value: + type: object + example: + : + : [0, 1, 4, 12] + : + : [] + : {} + - name: monitor_unlisted_consumer_groups + description: | + Setting monitor_unlisted_consumer_groups to `true` tells the check to discover all consumer groups + and fetch all their known offsets. If this is not set to true, you must specify consumer_groups. + + WARNING: This feature requires that your Kafka brokers be version >= 0.10.2. It is impossible to + support this feature on older brokers because they do not provide a way to determine the mapping + of consumer groups to topics. For details, see KIP-88. For older Kafka brokers, the consumer groups + must be specified. This requirement only applies to the brokers, not the consumers--they can be any version. + + Deprecation notice: This feature works for Zookeeper-based consumers. However, all + functionality related to fetching offsets from Zookeeper is deprecated and will be removed from this + check in the future. + value: + type: boolean + example: false + - name: monitor_all_broker_highwatermarks + description: | + Setting monitor_all_broker_highwatermarks to `true` tells the check to + discover and fetch the broker highwater mark offsets for all kafka topics in + the cluster. Otherwise highwater mark offsets will only be fetched for topic + partitions where that check run has already fetched a consumer offset. Internal + Kafka topics like __consumer_offsets, __transaction_state, etc are always excluded. + value: + type: boolean + example: false + - name: tags + description: | + List of tags to attach to every metric and service check emitted by this integration. + + Learn more about tagging at https://docs.datadoghq.com/tagging + value: + type: array + items: + type: string + example: + - : + - : + - name: security_protocol + description: | + Protocol used to communicate with brokers. + Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. + Default: PLAINTEXT. + default: PLAINTEXT + value: + type: string + example: PLAINTEXT + - name: sasl_mechanism + description: | + String picking sasl mechanism when security_protocol is SASL_PLAINTEXT or SASL_SSL. + Valid values are: PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512. + value: + type: string + example: PLAIN + - name: sasl_plain_username + description: Username for sasl PLAIN or SCRAM authentication. + value: + type: string + example: + - name: sasl_plain_password + description: Password for sasl PLAIN or SCRAM authentication. + value: + type: string + example: + - name: sasl_kerberos_service_name + description: Service name to include in GSSAPI sasl mechanism handshake. + value: + type: string + example: kafka + - name: sasl_kerberos_domain_name + description: Kerberos domain name to use in GSSAPI sasl mechanism handshake. + default: one of the bootstrap servers + value: + type: string + example: localhost + - name: ssl_context + description: | + Pre-configured SSLContext for wrapping socket connections. + If provided, all other ssl_* configurations are ignored. + value: + type: string + example: + - name: ssl_check_hostname + description: | + Flag to configure whether SSL handshake should verify that the + certificate matches the broker’s hostname. + default: true + value: + type: boolean + example: true + - name: ssl_cafile + description: Filename of CA file path to use in certificate verification. + value: + type: string + example: + - name: ssl_certfile + description: | + Filename path of file in PEM format containing the client certificate, + as well as any CA certificates needed to establish the certificate’s authenticity. + value: + type: string + example: + - name: ssl_keyfile + description: Optional filename containing the client private key. + value: + type: string + example: + - name: ssl_password + description: Password to be used when loading the certificate chain. + value: + type: string + example: + - name: ssl_crlfile + description: | + Filename path containing the CRL to check for certificate expiration. + By default, no CRL check is done. When providing a file, only the leaf certificate + will be checked against this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+. + value: + type: string + example: + - name: broker_requests_batch_size + description: | + The OffsetRequests sent to each broker are batched by the kafka_consumer check in groups of 30 by default. + If the batch size is too big, you may see KafkaTimeoutError exceptions in the logs while + running the wakeup calls. + If the batch size is too small, the check will take longer to run. + default: 30 + value: + type: integer + example: 30 + - name: zk_connect_str + description: | + DEPRECATION NOTICE: This option is only used for fetching consumer offsets + from Zookeeper and is deprecated. + Zookeeper endpoints and port to connect to. + In a production environment, it's often useful to specify multiple + Zookeeper nodes for a single check instance. This way you + only generate a single check process, but if one host goes down, + KafkaClient / KazooClient tries contacting the next host. + Details: https://github.com/DataDog/dd-agent/issues/2943 + value: + type: array + items: + type: object + example: + - localhost:2181 + - :2181 + - name: zk_prefix + description: | + DEPRECATION NOTICE: This option is only used for fetching consumer offsets + from Zookeeper and is deprecated. + Zookeeper chroot prefix under which kafka data is living in zookeeper. + If kafka is connecting to `my-zookeeper:2181/kafka` then the `zk_prefix` is `/kafka`. + value: + type: string + example: + - name: kafka_consumer_offsets + description: | + DEPRECATION NOTICE: This option is only used for fetching consumer offsets + from Zookeeper and is deprecated. + This setting only applies if zk_connect_str is set + Set to true to fetch consumer offsets from both Zookeeper and Kafka + Set to false to fetch consumer offsets only from Zookeeper. + value: + type: boolean + example: false + default: false \ No newline at end of file diff --git a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example index 2e2e0654510b4..4e193a9fdb2d1 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example +++ b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example @@ -1,25 +1,26 @@ ## WARNING: To avoid blindly collecting offsets and lag for an unbounded number ## of partitions (as could be the case after enabling monitor_unlisted_consumer_groups ## or monitor_all_broker_highwatermarks) the check collects metrics for at most 500 partitions. - +## ## DEPRECATION NOTICE: In the early days of Kafka, consumer offsets were stored in Zookeeper. ## So this check currently supports fetching consumer offsets from both Kafka and Zookeeper. ## However, Kafka 0.9 (released in 2015) deprecated Zookeeper storage. As a result, we have also ## deprecated fetching consumer offsets from Zookeeper and at some point in the future that ## functionality will be removed from this check. - +# init_config: - ## @param kafka_timeout - integer - optional - default: 5 - ## Customizes the Kafka connection timeout. - # - # kafka_timeout: 5 + ## @param kafka_timeout - integer - optional - default: 5 + ## Customizes the Kafka connection timeout. + # + # kafka_timeout: 5 - ## @param zk_timeout - integer - optional - default: 5 - ## DEPRECATED: Customizes the ZooKeeper connection timeout. - # - # zk_timeout: 5 + ## @param zk_timeout - integer - optional - default: 5 + ## DEPRECATED: Customizes the ZooKeeper connection timeout. + # + # zk_timeout: 5 +# instances: ## @param kafka_connect_str - list of strings - required @@ -31,11 +32,11 @@ instances: ## KafkaClient tries contacting the next host. ## Details: https://github.com/DataDog/dd-agent/issues/2943 # - - kafka_connect_str: - - localhost:9092 - # - :9092 + kafka_connect_str: + - localhost:9092 + - :9092 - ## @param kafka_client_api_version - string - optional + ## @param kafka_client_api_version - string - optional - default: 2.3.0 ## Specify the highest client protocol version supported by all brokers in the cluster. ## ## This is a performance optimization. If this is not set, then the check automatically probes @@ -44,9 +45,9 @@ instances: ## probing randomly picks a broker to probe, so in a mixed-version cluster, probing returns a ## non-deterministic result. # - # kafka_client_api_version: "2.3.0" + # kafka_client_api_version: 2.3.0 - ## @param consumer_groups - object - optional + ## @param consumer_groups - mapping - optional ## Each level is optional. Any empty values are fetched from the Kafka cluster. ## You can have empty partitions (example: ), topics (example: ), ## and even consumer_groups. If you omit consumer_groups, you must set `monitor_unlisted_consumer_groups` to true. @@ -57,12 +58,16 @@ instances: # # consumer_groups: # : - # : [0, 1, 4, 12] + # : + # - 0 + # - 1 + # - 4 + # - 12 # : # : [] # : {} - ## @param monitor_unlisted_consumer_groups - boolean - optional + ## @param monitor_unlisted_consumer_groups - boolean - optional - default: false ## Setting monitor_unlisted_consumer_groups to `true` tells the check to discover all consumer groups ## and fetch all their known offsets. If this is not set to true, you must specify consumer_groups. ## @@ -77,7 +82,7 @@ instances: # # monitor_unlisted_consumer_groups: false - ## @param monitor_all_broker_highwatermarks - boolean - optional + ## @param monitor_all_broker_highwatermarks - boolean - optional - default: false ## Setting monitor_all_broker_highwatermarks to `true` tells the check to ## discover and fetch the broker highwater mark offsets for all kafka topics in ## the cluster. Otherwise highwater mark offsets will only be fetched for topic @@ -86,7 +91,7 @@ instances: # # monitor_all_broker_highwatermarks: false - ## @param tags - list of key:value string - optional + ## @param tags - list of strings - optional ## List of tags to attach to every metric and service check emitted by this integration. ## ## Learn more about tagging at https://docs.datadoghq.com/tagging @@ -95,14 +100,14 @@ instances: # - : # - : - ## @param security_protocol - string - optional + ## @param security_protocol - string - optional - default: PLAINTEXT ## Protocol used to communicate with brokers. ## Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. ## Default: PLAINTEXT. # # security_protocol: PLAINTEXT - ## @param sasl_mechanism - string - optional + ## @param sasl_mechanism - string - optional - default: PLAIN ## String picking sasl mechanism when security_protocol is SASL_PLAINTEXT or SASL_SSL. ## Valid values are: PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512. # @@ -123,7 +128,7 @@ instances: # # sasl_kerberos_service_name: kafka - ## @param sasl_kerberos_domain_name - string - optional - default: one of the bootstrap servers + ## @param sasl_kerberos_domain_name - string - optional - default: localhost ## Kerberos domain name to use in GSSAPI sasl mechanism handshake. # # sasl_kerberos_domain_name: localhost @@ -134,7 +139,7 @@ instances: # # ssl_context: - ## @param ssl_check_hostname - string - optional - default: true + ## @param ssl_check_hostname - boolean - optional - default: true ## Flag to configure whether SSL handshake should verify that the ## certificate matches the broker’s hostname. # @@ -170,17 +175,16 @@ instances: ## @param broker_requests_batch_size - integer - optional - default: 30 ## The OffsetRequests sent to each broker are batched by the kafka_consumer check in groups of 30 by default. - ## If the batch size is too big, you may see KafkaTimeoutError exceptions in the logs while running the wakeup calls. + ## If the batch size is too big, you may see KafkaTimeoutError exceptions in the logs while + ## running the wakeup calls. ## If the batch size is too small, the check will take longer to run. # # broker_requests_batch_size: 30 - ## DEPRECATED: - ## The following settings are only used when fetching consumer offsets from Zookeeper. - ## This functionality is deprecated and will be removed at some point in the future. - - ## @param zk_connect_str - list of objects - required - ## Deprecated: Zookeeper endpoints and port to connect to. + ## @param zk_connect_str - list of mappings - optional + ## DEPRECATION NOTICE: This option is only used for fetching consumer offsets + ## from Zookeeper and is deprecated. + ## Zookeeper endpoints and port to connect to. ## In a production environment, it's often useful to specify multiple ## Zookeeper nodes for a single check instance. This way you ## only generate a single check process, but if one host goes down, @@ -188,17 +192,21 @@ instances: ## Details: https://github.com/DataDog/dd-agent/issues/2943 # # zk_connect_str: - # - localhost:2181 - # - :2181 + # - localhost:2181 + # - :2181 ## @param zk_prefix - string - optional - ## Deprecated: Zookeeper chroot prefix under which kafka data is living in zookeeper. + ## DEPRECATION NOTICE: This option is only used for fetching consumer offsets + ## from Zookeeper and is deprecated. + ## Zookeeper chroot prefix under which kafka data is living in zookeeper. ## If kafka is connecting to `my-zookeeper:2181/kafka` then the `zk_prefix` is `/kafka`. # # zk_prefix: ## @param kafka_consumer_offsets - boolean - optional - default: false - ## Deprecated: This setting only applies if zk_connect_str is set + ## DEPRECATION NOTICE: This option is only used for fetching consumer offsets + ## from Zookeeper and is deprecated. + ## This setting only applies if zk_connect_str is set ## Set to true to fetch consumer offsets from both Zookeeper and Kafka ## Set to false to fetch consumer offsets only from Zookeeper. # diff --git a/kafka_consumer/manifest.json b/kafka_consumer/manifest.json index 21c29d7006f98..0169d3fdd388e 100644 --- a/kafka_consumer/manifest.json +++ b/kafka_consumer/manifest.json @@ -24,10 +24,13 @@ "type": "check", "integration_id": "kafka-consumer", "assets": { + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, "monitors": {}, "dashboards": {}, "service_checks": "assets/service_checks.json", "logs": {}, "metrics_metadata": "metadata.csv" } -} +} \ No newline at end of file From f38dece2d717b95c989b31aead9f6539bd229625 Mon Sep 17 00:00:00 2001 From: gsalami00 Date: Mon, 30 Nov 2020 17:41:47 -0500 Subject: [PATCH 2/4] Update spec.yaml --- kafka_consumer/assets/configuration/spec.yaml | 4 +++- .../datadog_checks/kafka_consumer/data/conf.yaml.example | 6 +++--- kafka_consumer/setup.py | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/kafka_consumer/assets/configuration/spec.yaml b/kafka_consumer/assets/configuration/spec.yaml index 3bf951ef4f19a..fc1e910b09f5e 100644 --- a/kafka_consumer/assets/configuration/spec.yaml +++ b/kafka_consumer/assets/configuration/spec.yaml @@ -60,6 +60,7 @@ files: value: type: string example: "2.3.0" + default: null - name: consumer_groups description: | Each level is optional. Any empty values are fetched from the Kafka cluster. @@ -131,6 +132,7 @@ files: value: type: string example: PLAIN + default: null - name: sasl_plain_username description: Username for sasl PLAIN or SCRAM authentication. value: @@ -148,10 +150,10 @@ files: example: kafka - name: sasl_kerberos_domain_name description: Kerberos domain name to use in GSSAPI sasl mechanism handshake. - default: one of the bootstrap servers value: type: string example: localhost + default: null - name: ssl_context description: | Pre-configured SSLContext for wrapping socket connections. diff --git a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example index 4e193a9fdb2d1..17fe2061aa27b 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example +++ b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example @@ -36,7 +36,7 @@ instances: - localhost:9092 - :9092 - ## @param kafka_client_api_version - string - optional - default: 2.3.0 + ## @param kafka_client_api_version - string - optional ## Specify the highest client protocol version supported by all brokers in the cluster. ## ## This is a performance optimization. If this is not set, then the check automatically probes @@ -107,7 +107,7 @@ instances: # # security_protocol: PLAINTEXT - ## @param sasl_mechanism - string - optional - default: PLAIN + ## @param sasl_mechanism - string - optional ## String picking sasl mechanism when security_protocol is SASL_PLAINTEXT or SASL_SSL. ## Valid values are: PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512. # @@ -128,7 +128,7 @@ instances: # # sasl_kerberos_service_name: kafka - ## @param sasl_kerberos_domain_name - string - optional - default: localhost + ## @param sasl_kerberos_domain_name - string - optional ## Kerberos domain name to use in GSSAPI sasl mechanism handshake. # # sasl_kerberos_domain_name: localhost diff --git a/kafka_consumer/setup.py b/kafka_consumer/setup.py index 1829bcb1debb9..099ad398604cb 100644 --- a/kafka_consumer/setup.py +++ b/kafka_consumer/setup.py @@ -28,7 +28,7 @@ def get_dependencies(): return f.readlines() -CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0' +CHECKS_BASE_REQ = 'datadog-checks-base>=11.0.0' setup( name='datadog-kafka_consumer', From 0b488f4c89dd2e62f9fd1ffac6aee04e69008d80 Mon Sep 17 00:00:00 2001 From: gsalami00 Date: Tue, 1 Dec 2020 10:15:27 -0500 Subject: [PATCH 3/4] Update Kafka Consumer spec --- kafka_consumer/assets/configuration/spec.yaml | 9 +++------ .../datadog_checks/kafka_consumer/data/conf.yaml.example | 9 +++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/kafka_consumer/assets/configuration/spec.yaml b/kafka_consumer/assets/configuration/spec.yaml index fc1e910b09f5e..8cdf7c7537fdd 100644 --- a/kafka_consumer/assets/configuration/spec.yaml +++ b/kafka_consumer/assets/configuration/spec.yaml @@ -11,8 +11,7 @@ files: DEPRECATION NOTICE: In the early days of Kafka, consumer offsets were stored in Zookeeper. So this check currently supports fetching consumer offsets from both Kafka and Zookeeper. However, Kafka 0.9 (released in 2015) deprecated Zookeeper storage. As a result, we have also - deprecated fetching consumer offsets from Zookeeper and at some point in the future that - functionality will be removed from this check. + deprecated fetching consumer offsets from Zookeeper. options: - name: kafka_timeout description: Customizes the Kafka connection timeout. @@ -68,8 +67,7 @@ files: and even consumer_groups. If you omit consumer_groups, you must set `monitor_unlisted_consumer_groups` to true. Deprecation notice: Omitting various levels works for zookeeper-based consumers. However, all - functionality related to fetching offsets from Zookeeper is deprecated and will be removed from this - check in the future. + functionality related to fetching offsets from Zookeeper is deprecated. value: type: object example: @@ -89,8 +87,7 @@ files: must be specified. This requirement only applies to the brokers, not the consumers--they can be any version. Deprecation notice: This feature works for Zookeeper-based consumers. However, all - functionality related to fetching offsets from Zookeeper is deprecated and will be removed from this - check in the future. + functionality related to fetching offsets from Zookeeper is deprecated. value: type: boolean example: false diff --git a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example index 17fe2061aa27b..8495f4e83d249 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example +++ b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example @@ -5,8 +5,7 @@ ## DEPRECATION NOTICE: In the early days of Kafka, consumer offsets were stored in Zookeeper. ## So this check currently supports fetching consumer offsets from both Kafka and Zookeeper. ## However, Kafka 0.9 (released in 2015) deprecated Zookeeper storage. As a result, we have also -## deprecated fetching consumer offsets from Zookeeper and at some point in the future that -## functionality will be removed from this check. +## deprecated fetching consumer offsets from Zookeeper. # init_config: @@ -53,8 +52,7 @@ instances: ## and even consumer_groups. If you omit consumer_groups, you must set `monitor_unlisted_consumer_groups` to true. ## ## Deprecation notice: Omitting various levels works for zookeeper-based consumers. However, all - ## functionality related to fetching offsets from Zookeeper is deprecated and will be removed from this - ## check in the future. + ## functionality related to fetching offsets from Zookeeper is deprecated. # # consumer_groups: # : @@ -77,8 +75,7 @@ instances: ## must be specified. This requirement only applies to the brokers, not the consumers--they can be any version. ## ## Deprecation notice: This feature works for Zookeeper-based consumers. However, all - ## functionality related to fetching offsets from Zookeeper is deprecated and will be removed from this - ## check in the future. + ## functionality related to fetching offsets from Zookeeper is deprecated. # # monitor_unlisted_consumer_groups: false From ffe7c09d25ef890630dfda843f63b513d736b321 Mon Sep 17 00:00:00 2001 From: gsalami00 Date: Tue, 1 Dec 2020 13:02:25 -0500 Subject: [PATCH 4/4] Update Kafka spec --- kafka_consumer/assets/configuration/spec.yaml | 26 +++++++++---------- .../kafka_consumer/data/conf.yaml.example | 6 +++-- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/kafka_consumer/assets/configuration/spec.yaml b/kafka_consumer/assets/configuration/spec.yaml index 8cdf7c7537fdd..e84a6ebdadcd7 100644 --- a/kafka_consumer/assets/configuration/spec.yaml +++ b/kafka_consumer/assets/configuration/spec.yaml @@ -2,16 +2,19 @@ name: Kafka Consumer files: - name: kafka_consumer.yaml options: - - name: init_config - description: | - WARNING: To avoid blindly collecting offsets and lag for an unbounded number - of partitions (as could be the case after enabling monitor_unlisted_consumer_groups - or monitor_all_broker_highwatermarks) the check collects metrics for at most 500 partitions. + - template: init_config + overrides: + description: | + All options defined here are available to all instances. + + WARNING: To avoid blindly collecting offsets and lag for an unbounded number + of partitions (as could be the case after enabling monitor_unlisted_consumer_groups + or monitor_all_broker_highwatermarks) the check collects metrics for at most 500 partitions. - DEPRECATION NOTICE: In the early days of Kafka, consumer offsets were stored in Zookeeper. - So this check currently supports fetching consumer offsets from both Kafka and Zookeeper. - However, Kafka 0.9 (released in 2015) deprecated Zookeeper storage. As a result, we have also - deprecated fetching consumer offsets from Zookeeper. + DEPRECATION NOTICE: In the early days of Kafka, consumer offsets were stored in Zookeeper. + So this check currently supports fetching consumer offsets from both Kafka and Zookeeper. + However, Kafka 0.9 (released in 2015) deprecated Zookeeper storage. As a result, we have also + deprecated fetching consumer offsets from Zookeeper. options: - name: kafka_timeout description: Customizes the Kafka connection timeout. @@ -26,9 +29,7 @@ files: type: integer example: 5 default: 5 - - name: instances - description: | - + - template: instances options: - name: kafka_connect_str description: | @@ -46,7 +47,6 @@ files: type: string example: - localhost:9092 - - :9092 - name: kafka_client_api_version description: | Specify the highest client protocol version supported by all brokers in the cluster. diff --git a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example index 8495f4e83d249..81bfe11a12401 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example +++ b/kafka_consumer/datadog_checks/kafka_consumer/data/conf.yaml.example @@ -1,3 +1,5 @@ +## All options defined here are available to all instances. +## ## WARNING: To avoid blindly collecting offsets and lag for an unbounded number ## of partitions (as could be the case after enabling monitor_unlisted_consumer_groups ## or monitor_all_broker_highwatermarks) the check collects metrics for at most 500 partitions. @@ -19,6 +21,7 @@ init_config: # # zk_timeout: 5 +## Every instance is scheduled independent of the others. # instances: @@ -31,9 +34,8 @@ instances: ## KafkaClient tries contacting the next host. ## Details: https://github.com/DataDog/dd-agent/issues/2943 # - kafka_connect_str: + - kafka_connect_str: - localhost:9092 - - :9092 ## @param kafka_client_api_version - string - optional ## Specify the highest client protocol version supported by all brokers in the cluster.