Skip to content

Commit

Permalink
[kafka_consumer] adding new configuration elements.
Browse files Browse the repository at this point in the history
[kafka_consumer] fix cops.
  • Loading branch information
truthbk committed Aug 4, 2017
1 parent 7e7221f commit 81655e6
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 2 deletions.
3 changes: 1 addition & 2 deletions kafka_consumer/ci/kafka_consumer.rake
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ namespace :ci do
Wait.for 9092
wait_on_docker_logs('resources_kafka_1', 20, ' started (kafka.server.KafkaServer)')
wait_on_docker_logs('resources_zookeeper_1', 20, 'NoNode for /brokers')
if Gem::Version::new(kafka_consumer_version) > Gem::Version::new(kafka_legacy)
if Gem::Version.new(kafka_consumer_version) > Gem::Version.new(kafka_legacy)
wait_on_docker_logs('resources_kafka_1', 20, 'Created topic "marvel"')
wait_on_docker_logs('resources_kafka_1', 20, 'Created topic "dc"')
end
Expand All @@ -42,7 +42,6 @@ namespace :ci do
docker-compose -f #{ENV['TRAVIS_BUILD_DIR']}/kafka/ci/resources/docker-compose-single-broker.yml scale kafka=2)
wait_on_docker_logs('resources_kafka_2', 20, ' started (kafka.server.KafkaServer)')


end

task before_script: ['ci:common:before_script'] do
Expand Down
10 changes: 10 additions & 0 deletions kafka_consumer/conf.yaml.example
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ init_config:
# zk_timeout: 5
# Customize the Kafka connection timeout here
# kafka_timeout: 5
# Customize max number of retries per failed query to Kafka
# kafka_retries: 3
# Customize the number of seconds that must elapse between running this check.
# When checking Kafka offsets stored in Zookeeper, a single run of this check
# must stat zookeeper more than the number of consumers * topic_partitions
Expand All @@ -11,19 +13,27 @@ init_config:
# https://help.datadoghq.com/hc/en-us/articles/203557899-How-do-I-change-the-frequency-of-an-agent-check-
# min_collection_interval: 600


instances:
# In a production environment, it's often useful to specify multiple
# Kafka / Zookeper nodes for a single check instance. This way you
# only generate a single check process, but if one host goes down,
# KafkaClient / KazooClient will try contacting the next host.
# Details: https://github.com/DataDog/dd-agent/issues/2943
#
# If you wish to only collect consumer offsets from kafka, because
# you're using the new style consumers, you can comment out all
# zk_* configuration elements below.
# Please note that unlisted consumer groups are not supported at
# the moment when zookeeper consumer offset collection is disabled.
- kafka_connect_str:
- localhost:9092
- another_kafka_broker:9092
zk_connect_str:
- localhost:2181
- another_zookeeper:2181
# zk_prefix: /0.8
# kafka_consumer_offsets: false
consumer_groups:
my_consumer: # consumer group name
my_topic: [0, 1, 4, 12] # topic_name: list of partitions
Expand Down

0 comments on commit 81655e6

Please sign in to comment.