Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade librdkafka to 1.6.1 and prepare release 0.11.7 #1228

Merged
merged 5 commits into from
Sep 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@ jobs:
PROPTEST_CASES: 2500
RUSTFLAGS: -D warnings -C target-feature=+avx,+avx2,+sse4.2
with:
version: "0.18.0-alpha3"
version: "0.18.2"
timeout: "120s"
args: " --avoid-cfg-tarpaulin --exclude-files target* tremor-cli tremor-api **/errors.rs --out Lcov --all"
- name: Coveralls
uses: coverallsapp/github-action@master
Expand Down
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Changelog

## 0.11.7

### Fixes

- Upgrade dependency on librdkafka to 1.5.2 [#1228](https://github.com/tremor-rs/tremor-runtime/pull/1228).
## 0.11.6
### Fixes

Expand Down
21 changes: 11 additions & 10 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ license = "Apache-2.0"
name = "tremor-runtime"
readme = "README.md"
repository = "https://github.com/tremor-rs/tremor-runtime"
version = "0.11.6"
version = "0.11.7"

[workspace]

Expand Down Expand Up @@ -103,11 +103,11 @@ postgres-protocol = "0.6"
tokio-postgres = "0.7"

# kafka. cmake is the encouraged way to build this and also the one that works on windows/with musl.
rdkafka = { version = "0.24", features = [
rdkafka = { version = "0.26", features = [
"cmake-build",
"libz-static",
], default-features = false }
rdkafka-sys = { version = "2.0.0", features = [
rdkafka-sys = { version = "4.0.0", features = [
"cmake-build",
"libz-static",
] } # tracking the version rdkafka depends on
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile.learn
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ FROM rust:1.52.1 as builder

RUN cargo install --features=ssl websocat

FROM tremorproject/tremor:0.11.6
FROM tremorproject/tremor:0.11.7

COPY --from=builder /usr/local/cargo/bin/websocat /usr/local/bin/websocat

Expand Down
32 changes: 14 additions & 18 deletions src/connectors/pb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,6 @@ pub(crate) fn u64_repeated_to_pb(json: Option<&Value<'_>>) -> Result<Vec<u64>> {

#[cfg(test)]
mod test {
use std::{f64, i32};

use super::*;

use proptest::proptest;
Expand Down Expand Up @@ -158,15 +156,15 @@ mod test {
}

prop_compose! {
fn fveci()(vec in prop::collection::vec(f64::MIN..f64::MAX, 1..3))
fn fveci()(vec in prop::collection::vec(prop::num::f64::POSITIVE | prop::num::f64::NEGATIVE, 1..3))
(index in 0..vec.len(), vec in Just(vec))
-> (Vec<f64>, usize) {
(vec, index)
}
}

prop_compose! {
fn uveci()(vec in prop::collection::vec(u64::MIN..u64::MAX, 1..3))
fn uveci()(vec in prop::collection::vec(prop::num::u64::ANY, 1..3))
(index in 0..vec.len(), vec in Just(vec))
-> (Vec<u64>, usize) {
(vec, index)
Expand All @@ -188,7 +186,7 @@ mod test {

#[test]
fn prop_pb_u64(
arb_ints in prop::collection::vec(u64::MIN..u64::MAX, 0..100),
arb_ints in prop::collection::vec(prop::num::u64::ANY, 0..100),
) {
for expected in arb_ints {
let json = Value::Static(StaticNode::U64(expected));
Expand All @@ -200,7 +198,7 @@ mod test {

#[test]
fn prop_pb_i64(
arb_ints in prop::collection::vec(i64::MIN..i64::MAX, 0..100),
arb_ints in prop::collection::vec(prop::num::i64::ANY, 0..100),
) {
for expected in arb_ints {
let json = Value::Static(StaticNode::I64(expected));
Expand All @@ -212,7 +210,7 @@ mod test {

#[test]
fn prop_pb_u32(
arb_ints in prop::collection::vec(u32::MIN..u32::MAX, 0..100),
arb_ints in prop::collection::vec(prop::num::u32::ANY, 0..100),
) {
for expected in arb_ints {
let json = Value::Static(StaticNode::U64(expected as u64));
Expand All @@ -223,7 +221,7 @@ mod test {

#[test]
fn prop_pb_i32(
arb_ints in prop::collection::vec(i32::MIN..i32::MAX, 0..100),
arb_ints in prop::collection::vec(prop::num::i32::ANY, 0..100),
) {
for expected in arb_ints {
let json = Value::Static(StaticNode::I64(expected as i64));
Expand All @@ -234,21 +232,19 @@ mod test {

#[test]
fn prop_pb_f64(
arb_ints in prop::collection::vec(f64::MIN..f64::MAX, 0..100),
arb_int in prop::num::f64::POSITIVE | prop::num::f64::NEGATIVE,
) {
for expected in arb_ints {
let json = Value::Static(StaticNode::F64(expected as f64));
let pb = maybe_double_to_pb(Some(&json))?;
prop_assert_eq!(expected, pb);
}
let json = Value::from(arb_int);
let pb = maybe_double_to_pb(Some(&json))?;
prop_assert_eq!(arb_int, pb);
}

#[test]
fn prop_pb_f64_repeated((vec, _index) in fveci()) {
let json: Value = literal!(vec.clone());
let pb = f64_repeated_to_pb(Some(&json))?;
prop_assert_eq!(&vec, &pb);
prop_assert_eq!(pb.len(), vec.len());
let json: Value = literal!(vec.clone());
let pb = f64_repeated_to_pb(Some(&json))?;
prop_assert_eq!(&vec, &pb);
prop_assert_eq!(pb.len(), vec.len());
}

#[test]
Expand Down
79 changes: 24 additions & 55 deletions src/sink/kafka.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ use async_channel::{bounded, Receiver, Sender};
use halfbrown::HashMap;
use rdkafka::config::ClientConfig;
use rdkafka::{
error::KafkaError,
error::{KafkaError, RDKafkaError},
message::OwnedHeaders,
producer::{FutureProducer, FutureRecord},
producer::{FutureProducer, FutureRecord, Producer},
};
use std::{
fmt,
Expand Down Expand Up @@ -94,8 +94,8 @@ pub struct Kafka {
producer: FutureProducer,
postprocessors: Postprocessors,
reply_tx: Sender<sink::Reply>,
error_rx: Receiver<KafkaError>,
error_tx: Sender<KafkaError>,
error_rx: Receiver<RDKafkaError>,
error_tx: Sender<RDKafkaError>,
}

impl fmt::Debug for Kafka {
Expand Down Expand Up @@ -129,40 +129,6 @@ impl offramp::Impl for Kafka {
}
}

fn is_fatal(e: &KafkaError) -> bool {
matches!(
e,
KafkaError::AdminOp(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::ConsumerCommit(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::Global(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::GroupListFetch(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::MessageConsumption(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::MessageProduction(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::MetadataFetch(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::OffsetFetch(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::SetPartitionOffset(rdkafka::error::RDKafkaError::Fatal)
| KafkaError::StoreOffset(rdkafka::error::RDKafkaError::Fatal)
)
}

unsafe fn get_fatal_error<C>(
client: &rdkafka::client::Client<C>,
) -> Option<(rdkafka::types::RDKafkaRespErr, String)>
where
C: rdkafka::ClientContext,
{
const LEN: usize = 4096;
let mut buf: [i8; LEN] = std::mem::MaybeUninit::uninit().assume_init();
let client_ptr = client.native_ptr();

let code = rdkafka_sys::bindings::rd_kafka_fatal_error(client_ptr, buf.as_mut_ptr(), LEN);
if code == rdkafka::types::RDKafkaRespErr::RD_KAFKA_RESP_ERR_NO_ERROR {
None
} else {
Some((code, rdkafka::util::cstr_to_owned(buf.as_ptr())))
}
}

/// Waits for actual delivery to kafka cluster and sends ack or fail.
/// Also sends fatal errors for handling in offramp task.
#[allow(clippy::cast_possible_truncation)]
Expand All @@ -172,18 +138,20 @@ async fn wait_for_delivery(
processing_start: Instant,
maybe_event: Option<Event>,
reply_tx: Sender<sink::Reply>,
error_tx: Sender<KafkaError>,
error_tx: Sender<RDKafkaError>,
) -> Result<()> {
let cb = match futures::future::try_join_all(futures).await {
Ok(results) => {
if let Some((kafka_error, _)) = results.into_iter().find_map(std::result::Result::err) {
if let Some((KafkaError::Transaction(rd_error), _)) =
results.into_iter().find_map(std::result::Result::err)
{
error!(
"[Sink::{}] Error delivering kafka record: {}",
sink_url, &kafka_error
sink_url, &rd_error
);
if is_fatal(&kafka_error) {
let err_msg = format!("{}", &kafka_error);
if error_tx.send(kafka_error).await.is_err() {
if rd_error.is_fatal() {
let err_msg = format!("{}", &rd_error);
if error_tx.send(rd_error).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about kafka error: {}",
&sink_url, &err_msg
Expand Down Expand Up @@ -237,14 +205,13 @@ impl Kafka {
Ok(())
}

fn handle_fatal_error(&mut self, _fatal_error: &KafkaError) -> Result<()> {
let maybe_fatal_error = unsafe { get_fatal_error(self.producer.client()) };
if let Some(fatal_error) = maybe_fatal_error {
error!(
"[Sink::{}] Fatal Error({:?}): {}",
&self.sink_url, fatal_error.0, fatal_error.1
);
}
fn handle_fatal_error(&mut self, fatal_error: &RDKafkaError) -> Result<()> {
error!(
"[Sink::{}] Fatal Error({:?}): {}",
&self.sink_url,
fatal_error.code(),
fatal_error.string()
);
error!("[Sink::{}] Reinitiating client...", &self.sink_url);
self.producer = self.config.producer()?;
error!("[Sink::{}] Client reinitiated.", &self.sink_url);
Expand Down Expand Up @@ -310,9 +277,11 @@ impl Sink for Kafka {
"[Sink::{}] failed to enqueue message: {}",
&self.sink_url, e
);
if is_fatal(&e) {
// handle fatal errors right here, without enqueueing
self.handle_fatal_error(&e)?;
if let KafkaError::Transaction(e) = e {
if e.is_fatal() {
// handle fatal errors right here, without enqueueing
self.handle_fatal_error(&e)?;
}
}
// bail out with a CB fail on enqueue error
if event.transactional {
Expand Down
Loading