From 81655e65585a3e899870be878eec3d82a9ef353b Mon Sep 17 00:00:00 2001 From: Jaime Fullaondo Date: Fri, 4 Aug 2017 17:50:54 +0200 Subject: [PATCH] [kafka_consumer] adding new configuration elements. [kafka_consumer] fix cops. --- kafka_consumer/ci/kafka_consumer.rake | 3 +-- kafka_consumer/conf.yaml.example | 10 ++++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/kafka_consumer/ci/kafka_consumer.rake b/kafka_consumer/ci/kafka_consumer.rake index d7b107d9c341b1..6dbab268b5f4df 100644 --- a/kafka_consumer/ci/kafka_consumer.rake +++ b/kafka_consumer/ci/kafka_consumer.rake @@ -32,7 +32,7 @@ namespace :ci do Wait.for 9092 wait_on_docker_logs('resources_kafka_1', 20, ' started (kafka.server.KafkaServer)') wait_on_docker_logs('resources_zookeeper_1', 20, 'NoNode for /brokers') - if Gem::Version::new(kafka_consumer_version) > Gem::Version::new(kafka_legacy) + if Gem::Version.new(kafka_consumer_version) > Gem::Version.new(kafka_legacy) wait_on_docker_logs('resources_kafka_1', 20, 'Created topic "marvel"') wait_on_docker_logs('resources_kafka_1', 20, 'Created topic "dc"') end @@ -42,7 +42,6 @@ namespace :ci do docker-compose -f #{ENV['TRAVIS_BUILD_DIR']}/kafka/ci/resources/docker-compose-single-broker.yml scale kafka=2) wait_on_docker_logs('resources_kafka_2', 20, ' started (kafka.server.KafkaServer)') - end task before_script: ['ci:common:before_script'] do diff --git a/kafka_consumer/conf.yaml.example b/kafka_consumer/conf.yaml.example index 89d5b1937a9dc2..bc4a57a528a428 100644 --- a/kafka_consumer/conf.yaml.example +++ b/kafka_consumer/conf.yaml.example @@ -3,6 +3,8 @@ init_config: # zk_timeout: 5 # Customize the Kafka connection timeout here # kafka_timeout: 5 + # Customize max number of retries per failed query to Kafka + # kafka_retries: 3 # Customize the number of seconds that must elapse between running this check. # When checking Kafka offsets stored in Zookeeper, a single run of this check # must stat zookeeper more than the number of consumers * topic_partitions @@ -11,12 +13,19 @@ init_config: # https://help.datadoghq.com/hc/en-us/articles/203557899-How-do-I-change-the-frequency-of-an-agent-check- # min_collection_interval: 600 + instances: # In a production environment, it's often useful to specify multiple # Kafka / Zookeper nodes for a single check instance. This way you # only generate a single check process, but if one host goes down, # KafkaClient / KazooClient will try contacting the next host. # Details: https://github.com/DataDog/dd-agent/issues/2943 + # + # If you wish to only collect consumer offsets from kafka, because + # you're using the new style consumers, you can comment out all + # zk_* configuration elements below. + # Please note that unlisted consumer groups are not supported at + # the moment when zookeeper consumer offset collection is disabled. - kafka_connect_str: - localhost:9092 - another_kafka_broker:9092 @@ -24,6 +33,7 @@ instances: - localhost:2181 - another_zookeeper:2181 # zk_prefix: /0.8 + # kafka_consumer_offsets: false consumer_groups: my_consumer: # consumer group name my_topic: [0, 1, 4, 12] # topic_name: list of partitions