From d3b0dbcfca7acc8143be806ff9739947fdbe2487 Mon Sep 17 00:00:00 2001 From: Steve Hu Date: Mon, 23 Aug 2021 13:20:11 -0400 Subject: [PATCH] fixes #82 update the kafka-producer.yml and kafka-consumer.yml based on the kafka-sidecar --- kafka-common/src/test/resources/config/kafka-consumer.yml | 2 +- kafka-common/src/test/resources/config/kafka-producer.yml | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/kafka-common/src/test/resources/config/kafka-consumer.yml b/kafka-common/src/test/resources/config/kafka-consumer.yml index 67e63d6..bec6ee2 100644 --- a/kafka-common/src/test/resources/config/kafka-consumer.yml +++ b/kafka-common/src/test/resources/config/kafka-consumer.yml @@ -26,7 +26,7 @@ properties: # basic.auth.credentials.source: ${kafka-consumer.basic.auth.credentials.source:USER_INFO} # max fetch size from Kafka cluster. Default 50mb is too big for cache consumption on the sidecar fetch.max.bytes: ${kafka-consumer.fetch.max.bytes:102400} - # max pol records default is 500. Adjust it based on the size of the records to make sure each poll + # max poll records default is 500. Adjust it based on the size of the records to make sure each poll # is similar to requestMaxBytes down below. max.poll.records: ${kafka-consumer.max.poll.records:100} # The maximum amount of data per-partition the server will return. Records are fetched in batches by the consumer. diff --git a/kafka-common/src/test/resources/config/kafka-producer.yml b/kafka-common/src/test/resources/config/kafka-producer.yml index 9fcdb4e..00c2540 100644 --- a/kafka-common/src/test/resources/config/kafka-producer.yml +++ b/kafka-common/src/test/resources/config/kafka-producer.yml @@ -39,13 +39,15 @@ properties: # basic authentication user:pass for the schema registry # basic.auth.user.info: ${kafka-producer.username:username}:${kafka-producer.password:password} # basic.auth.credentials.source: ${kafka-producer.basic.auth.credentials.source:USER_INFO} + # If you have message that is bigger than 1 MB to produce, increase this value. + max.message.size: ${kafka-producer.max.message.size:1048576} # The default topic for the producer. Only certain producer implementation will use it. topic: ${kafka-producer.topic:portal-event} # Default key format if no schema for the topic key -keyFormat: string +keyFormat: ${kafka-producer.keyFormat:jsonschema} # Default value format if no schema for the topic value -valueFormat: string +valueFormat: ${kafka-producer.valueFormat:jsonschema} # If open tracing is enable. traceability, correlation and metrics should not be in the chain if opentracing is used. injectOpenTracing: ${kafka-producer.injectOpenTracing:false} # Inject serviceId as callerId into the http header for metrics to collect the caller. The serviceId is from server.yml @@ -53,6 +55,6 @@ injectCallerId: ${kafka-producer.injectCallerId:false} # Indicator if the audit is enabled. auditEnabled: ${kafka-producer.auditEnabled:true} # Audit log destination topic or logfile. Default to topic -auditTarget: ${kafka-producer.auditTarget:topic} +auditTarget: ${kafka-producer.auditTarget:logfile} # The consumer audit topic name if the auditTarget is topic auditTopic: ${kafka-producer.auditTopic:sidecar-audit}