From fe9b603bcee946dc3777541f659809927bfc696f Mon Sep 17 00:00:00 2001 From: barshaul Date: Sun, 20 Oct 2024 10:25:36 +0000 Subject: [PATCH] =?UTF-8?q?Avoid=20retrying=20on=20IO=20errors=20when=20it?= =?UTF-8?q?=E2=80=99s=20unclear=20if=20the=20server=20received=20the=20req?= =?UTF-8?q?uest?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: barshaul --- .../redis-rs/redis/src/aio/connection.rs | 2 +- .../redis/src/aio/multiplexed_connection.rs | 36 +-- glide-core/redis-rs/redis/src/cluster.rs | 3 +- .../cluster_async/connections_container.rs | 35 +-- .../redis-rs/redis/src/cluster_async/mod.rs | 85 +++++-- glide-core/redis-rs/redis/src/types.rs | 7 + glide-core/redis-rs/redis/tests/test_async.rs | 2 +- .../redis/tests/test_cluster_async.rs | 220 ++++++++++++++---- 8 files changed, 285 insertions(+), 105 deletions(-) diff --git a/glide-core/redis-rs/redis/src/aio/connection.rs b/glide-core/redis-rs/redis/src/aio/connection.rs index 5adef7869f..2b32a7ced3 100644 --- a/glide-core/redis-rs/redis/src/aio/connection.rs +++ b/glide-core/redis-rs/redis/src/aio/connection.rs @@ -7,7 +7,7 @@ use crate::connection::{ resp2_is_pub_sub_state_cleared, resp3_is_pub_sub_state_cleared, ConnectionAddr, ConnectionInfo, Msg, RedisConnectionInfo, }; -#[cfg(any(feature = "tokio-comp"))] +#[cfg(feature = "tokio-comp")] use crate::parser::ValueCodec; use crate::types::{ErrorKind, FromRedisValue, RedisError, RedisFuture, RedisResult, Value}; use crate::{from_owned_redis_value, ProtocolVersion, ToRedisArgs}; diff --git a/glide-core/redis-rs/redis/src/aio/multiplexed_connection.rs b/glide-core/redis-rs/redis/src/aio/multiplexed_connection.rs index fb1b62f8a1..0597779ed5 100644 --- a/glide-core/redis-rs/redis/src/aio/multiplexed_connection.rs +++ b/glide-core/redis-rs/redis/src/aio/multiplexed_connection.rs @@ -349,7 +349,7 @@ where &mut self, item: SinkItem, timeout: Duration, - ) -> Result> { + ) -> Result { self.send_recv(item, None, timeout).await } @@ -359,7 +359,7 @@ where // If `None`, this is a single request, not a pipeline of multiple requests. pipeline_response_count: Option, timeout: Duration, - ) -> Result> { + ) -> Result { let (sender, receiver) = oneshot::channel(); self.sender @@ -369,15 +369,27 @@ where output: sender, }) .await - .map_err(|_| None)?; + .map_err(|err| { + // If an error occurs here, it means the request never reached the server, as guaranteed + // by the 'send' function. Since the server did not receive the data, it is safe to retry + // the request. + RedisError::from(( + crate::ErrorKind::IoErrorRetrySafe, + "Failed to send the request to the server", + format!("{err}"), + )) + })?; match Runtime::locate().timeout(timeout, receiver).await { - Ok(Ok(result)) => result.map_err(Some), + Ok(Ok(result)) => result, Ok(Err(_)) => { // The `sender` was dropped which likely means that the stream part - // failed for one reason or another - Err(None) + // failed for one reason or another. + // Since we don't know if the server received the request, retrying it isn't safe. + // For example, retrying an INCR request could result in double increments. + // Hence, we return an IoError instead of an IoErrorRetrySafe. + Err(RedisError::from(io::Error::from(io::ErrorKind::BrokenPipe))) } - Err(elapsed) => Err(Some(elapsed.into())), + Err(elapsed) => Err(elapsed.into()), } } @@ -503,10 +515,7 @@ impl MultiplexedConnection { let result = self .pipeline .send_single(cmd.get_packed_command(), self.response_timeout) - .await - .map_err(|err| { - err.unwrap_or_else(|| RedisError::from(io::Error::from(io::ErrorKind::BrokenPipe))) - }); + .await; if self.protocol != ProtocolVersion::RESP2 { if let Err(e) = &result { if e.is_connection_dropped() { @@ -537,10 +546,7 @@ impl MultiplexedConnection { Some(offset + count), self.response_timeout, ) - .await - .map_err(|err| { - err.unwrap_or_else(|| RedisError::from(io::Error::from(io::ErrorKind::BrokenPipe))) - }); + .await; if self.protocol != ProtocolVersion::RESP2 { if let Err(e) = &result { diff --git a/glide-core/redis-rs/redis/src/cluster.rs b/glide-core/redis-rs/redis/src/cluster.rs index f9c76f5161..f31c8cf9ed 100644 --- a/glide-core/redis-rs/redis/src/cluster.rs +++ b/glide-core/redis-rs/redis/src/cluster.rs @@ -771,7 +771,8 @@ where .wait_time_for_retry(retries); thread::sleep(sleep_time); } - crate::types::RetryMethod::Reconnect => { + crate::types::RetryMethod::Reconnect + | crate::types::RetryMethod::ReconnectAndRetry => { if *self.auto_reconnect.borrow() { if let Ok(mut conn) = self.connect(&addr) { if conn.check_connection() { diff --git a/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs b/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs index 2bfbb8b934..d89d063b78 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/connections_container.rs @@ -255,16 +255,18 @@ where &self, amount: usize, conn_type: ConnectionType, - ) -> impl Iterator> + '_ { - self.connection_map - .iter() - .choose_multiple(&mut rand::thread_rng(), amount) - .into_iter() - .map(move |item| { - let (address, node) = (item.key(), item.value()); - let conn = node.get_connection(&conn_type); - (address.clone(), conn) - }) + ) -> Option> + '_> { + (!self.connection_map.is_empty()).then_some({ + self.connection_map + .iter() + .choose_multiple(&mut rand::thread_rng(), amount) + .into_iter() + .map(move |item| { + let (address, node) = (item.key(), item.value()); + let conn = node.get_connection(&conn_type); + (address.clone(), conn) + }) + }) } pub(crate) fn replace_or_add_connection_for_address( @@ -633,6 +635,7 @@ mod tests { let random_connections: HashSet<_> = container .random_connections(3, ConnectionType::User) + .expect("No connections found") .map(|pair| pair.1) .collect(); @@ -647,12 +650,9 @@ mod tests { let container = create_container(); remove_all_connections(&container); - assert_eq!( - 0, - container - .random_connections(1, ConnectionType::User) - .count() - ); + assert!(container + .random_connections(1, ConnectionType::User) + .is_none()); } #[test] @@ -665,6 +665,7 @@ mod tests { ); let random_connections: Vec<_> = container .random_connections(1, ConnectionType::User) + .expect("No connections found") .collect(); assert_eq!(vec![(address, 4)], random_connections); @@ -675,6 +676,7 @@ mod tests { let container = create_container(); let mut random_connections: Vec<_> = container .random_connections(1000, ConnectionType::User) + .expect("No connections found") .map(|pair| pair.1) .collect(); random_connections.sort(); @@ -687,6 +689,7 @@ mod tests { let container = create_container_with_strategy(ReadFromReplicaStrategy::RoundRobin, true); let mut random_connections: Vec<_> = container .random_connections(1000, ConnectionType::PreferManagement) + .expect("No connections found") .map(|pair| pair.1) .collect(); random_connections.sort(); diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs index c8628c16bb..aa9f02e1e6 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs @@ -845,6 +845,7 @@ impl Future for Request { let request = this.request.as_mut().unwrap(); // TODO - would be nice if we didn't need to repeat this code twice, with & without retries. if request.retry >= this.retry_params.number_of_retries { + let retry_method = err.retry_method(); let next = if err.kind() == ErrorKind::AllConnectionsUnavailable { Next::ReconnectToInitialNodes { request: None }.into() } else if matches!(err.retry_method(), crate::types::RetryMethod::MovedRedirect) @@ -855,7 +856,9 @@ impl Future for Request { sleep_duration: None, } .into() - } else if matches!(err.retry_method(), crate::types::RetryMethod::Reconnect) { + } else if matches!(retry_method, crate::types::RetryMethod::Reconnect) + || matches!(retry_method, crate::types::RetryMethod::ReconnectAndRetry) + { if let OperationTarget::Node { address } = target { Next::Reconnect { request: None, @@ -934,13 +937,18 @@ impl Future for Request { }); self.poll(cx) } - crate::types::RetryMethod::Reconnect => { + crate::types::RetryMethod::Reconnect + | crate::types::RetryMethod::ReconnectAndRetry => { let mut request = this.request.take().unwrap(); // TODO should we reset the redirect here? request.info.reset_routing(); warn!("disconnected from {:?}", address); + let should_retry = matches!( + err.retry_method(), + crate::types::RetryMethod::ReconnectAndRetry + ); Next::Reconnect { - request: Some(request), + request: should_retry.then_some(request), target: address, } .into() @@ -1177,8 +1185,11 @@ where Ok(connections.0) } - fn reconnect_to_initial_nodes(&mut self) -> impl Future { - let inner = self.inner.clone(); + // Reconnet to the initial nodes provided by the user in the creation of the client, + // and try to refresh the slots based on the initial connections. + // Being used when all cluster connections are unavailable. + fn reconnect_to_initial_nodes(inner: Arc>) -> impl Future { + let inner = inner.clone(); async move { let connection_map = match Self::create_initial_connections( &inner.initial_nodes, @@ -1680,7 +1691,9 @@ where Self::refresh_slots_inner(inner, curr_retry) .await .map_err(|err| { - if curr_retry > DEFAULT_NUMBER_OF_REFRESH_SLOTS_RETRIES { + if curr_retry > DEFAULT_NUMBER_OF_REFRESH_SLOTS_RETRIES + || err.kind() == ErrorKind::AllConnectionsUnavailable + { BackoffError::Permanent(err) } else { BackoffError::from(err) @@ -2073,14 +2086,22 @@ where } ConnectionCheck::RandomConnection => { let read_guard = core.conn_lock.read().await; - let (random_address, random_conn_future) = read_guard + read_guard .random_connections(1, ConnectionType::User) - .next() - .ok_or(RedisError::from(( - ErrorKind::AllConnectionsUnavailable, - "No random connection found", - )))?; - return Ok((random_address, random_conn_future.await)); + .and_then(|mut random_connections| { + random_connections.next().map( + |(random_address, random_conn_future)| async move { + (random_address, random_conn_future.await) + }, + ) + }) + .ok_or_else(|| { + RedisError::from(( + ErrorKind::AllConnectionsUnavailable, + "No random connection found", + )) + })? + .await } }; @@ -2104,10 +2125,19 @@ where } Err(err) => { trace!("Recover slots failed!"); - *future = Box::pin(Self::refresh_slots_and_subscriptions_with_retries( - self.inner.clone(), - &RefreshPolicy::Throttable, - )); + let next_state = if err.kind() == ErrorKind::AllConnectionsUnavailable { + ConnectionState::Recover(RecoverFuture::Reconnect(Box::pin( + ClusterConnInner::reconnect_to_initial_nodes(self.inner.clone()), + ))) + } else { + ConnectionState::Recover(RecoverFuture::RecoverSlots(Box::pin( + Self::refresh_slots_and_subscriptions_with_retries( + self.inner.clone(), + &RefreshPolicy::Throttable, + ), + ))) + }; + self.state = next_state; Poll::Ready(Err(err)) } }, @@ -2226,9 +2256,7 @@ where })); } } - Next::Reconnect { - request, target, .. - } => { + Next::Reconnect { request, target } => { poll_flush_action = poll_flush_action.change_state(PollFlushAction::Reconnect(vec![target])); if let Some(request) = request { @@ -2371,7 +2399,7 @@ where } PollFlushAction::ReconnectFromInitialConnections => { self.state = ConnectionState::Recover(RecoverFuture::Reconnect(Box::pin( - self.reconnect_to_initial_nodes(), + ClusterConnInner::reconnect_to_initial_nodes(self.inner.clone()), ))); } } @@ -2413,8 +2441,19 @@ async fn calculate_topology_from_random_nodes<'a, C>( where C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - let requested_nodes = - read_guard.random_connections(num_of_nodes_to_query, ConnectionType::PreferManagement); + let requested_nodes = if let Some(random_conns) = + read_guard.random_connections(num_of_nodes_to_query, ConnectionType::PreferManagement) + { + random_conns + } else { + return ( + Err(RedisError::from(( + ErrorKind::AllConnectionsUnavailable, + "No available connections to refresh slots from", + ))), + vec![], + ); + }; let topology_join_results = futures::future::join_all(requested_nodes.map(|(addr, conn)| async move { let mut conn: C = conn.await; diff --git a/glide-core/redis-rs/redis/src/types.rs b/glide-core/redis-rs/redis/src/types.rs index a024f16a7d..b47eedd13b 100644 --- a/glide-core/redis-rs/redis/src/types.rs +++ b/glide-core/redis-rs/redis/src/types.rs @@ -118,6 +118,8 @@ pub enum ErrorKind { /// not native to the system. This is usually the case if /// the cause is another error. IoError, + /// An I/O error that is considered safe to retry as the request was not received by the server + IoErrorRetrySafe, /// An error raised that was identified on the client before execution. ClientError, /// An extension error. This is an error created by the server @@ -802,6 +804,7 @@ impl fmt::Debug for RedisError { pub(crate) enum RetryMethod { Reconnect, + ReconnectAndRetry, NoRetry, RetryImmediately, WaitAndRetry, @@ -870,6 +873,7 @@ impl RedisError { ErrorKind::CrossSlot => "cross-slot", ErrorKind::MasterDown => "master down", ErrorKind::IoError => "I/O error", + ErrorKind::IoErrorRetrySafe => "I/O error - Request wasn't received by the server", ErrorKind::ExtensionError => "extension error", ErrorKind::ClientError => "client error", ErrorKind::ReadOnly => "read-only", @@ -957,6 +961,7 @@ impl RedisError { pub fn is_unrecoverable_error(&self) -> bool { match self.retry_method() { RetryMethod::Reconnect => true, + RetryMethod::ReconnectAndRetry => true, RetryMethod::NoRetry => false, RetryMethod::RetryImmediately => false, @@ -1064,12 +1069,14 @@ impl RedisError { io::ErrorKind::PermissionDenied => RetryMethod::NoRetry, io::ErrorKind::Unsupported => RetryMethod::NoRetry, + io::ErrorKind::TimedOut => RetryMethod::NoRetry, _ => RetryMethod::RetryImmediately, }, _ => RetryMethod::RetryImmediately, }, ErrorKind::NotAllSlotsCovered => RetryMethod::NoRetry, + ErrorKind::IoErrorRetrySafe => RetryMethod::ReconnectAndRetry, } } } diff --git a/glide-core/redis-rs/redis/tests/test_async.rs b/glide-core/redis-rs/redis/tests/test_async.rs index d16f1e0694..3599153f62 100644 --- a/glide-core/redis-rs/redis/tests/test_async.rs +++ b/glide-core/redis-rs/redis/tests/test_async.rs @@ -569,7 +569,7 @@ mod basic_async { Err(err) => break err, } }; - assert_eq!(err.kind(), ErrorKind::IoError); // Shouldn't this be IoError? + assert_eq!(err.kind(), ErrorKind::IoErrorRetrySafe); } #[tokio::test] diff --git a/glide-core/redis-rs/redis/tests/test_cluster_async.rs b/glide-core/redis-rs/redis/tests/test_cluster_async.rs index e6a5984fa7..6e2a598548 100644 --- a/glide-core/redis-rs/redis/tests/test_cluster_async.rs +++ b/glide-core/redis-rs/redis/tests/test_cluster_async.rs @@ -1015,7 +1015,6 @@ mod cluster_async { let sleep_duration = core::time::Duration::from_millis(100); #[cfg(feature = "tokio-comp")] tokio::time::sleep(sleep_duration).await; - } } panic!("Failed to reach to the expected topology refresh retries. Found={refreshed_calls}, Expected={expected_calls}") @@ -2542,8 +2541,8 @@ mod cluster_async { match port { 6380 => panic!("Node should not be called"), _ => match completed.fetch_add(1, Ordering::SeqCst) { - 0..=1 => Err(Err(RedisError::from(std::io::Error::new( - std::io::ErrorKind::ConnectionReset, + 0..=1 => Err(Err(RedisError::from(( + ErrorKind::IoErrorRetrySafe, "mock-io-error", )))), _ => Err(Ok(Value::BulkString(b"123".to_vec()))), @@ -2598,6 +2597,81 @@ mod cluster_async { assert_eq!(completed.load(Ordering::SeqCst), 1); } + #[test] + #[serial_test::serial] + fn test_async_cluster_non_retryable_io_error_should_not_retry() { + let name = "test_async_cluster_non_retryable_io_error_should_not_retry"; + let requests = atomic::AtomicUsize::new(0); + let MockEnv { + runtime, + async_connection: mut connection, + .. + } = MockEnv::with_client_builder( + ClusterClient::builder(vec![&*format!("redis://{name}")]).retries(3), + name, + move |cmd: &[u8], _port| { + respond_startup_two_nodes(name, cmd)?; + let i = requests.fetch_add(1, atomic::Ordering::SeqCst); + match i { + 0 => Err(Err(RedisError::from((ErrorKind::IoError, "io-error")))), + _ => { + panic!("Expected not to be retried!") + } + } + }, + ); + runtime + .block_on(async move { + let res = cmd("INCR") + .arg("foo") + .query_async::<_, Option>(&mut connection) + .await; + assert!(res.is_err()); + let err = res.unwrap_err(); + assert!(err.is_io_error()); + Ok::<_, RedisError>(()) + }) + .unwrap(); + } + + #[test] + #[serial_test::serial] + fn test_async_cluster_retry_safe_io_error_should_be_retried() { + let name = "test_async_cluster_retry_safe_io_error_should_be_retried"; + let requests = atomic::AtomicUsize::new(0); + let MockEnv { + runtime, + async_connection: mut connection, + .. + } = MockEnv::with_client_builder( + ClusterClient::builder(vec![&*format!("redis://{name}")]).retries(3), + name, + move |cmd: &[u8], _port| { + respond_startup_two_nodes(name, cmd)?; + let i = requests.fetch_add(1, atomic::Ordering::SeqCst); + match i { + 0 => Err(Err(RedisError::from(( + ErrorKind::IoErrorRetrySafe, + "server didn't receive the request, safe to retry", + )))), + _ => Err(Ok(Value::Int(1))), + } + }, + ); + runtime + .block_on(async move { + let res = cmd("INCR") + .arg("foo") + .query_async::<_, i32>(&mut connection) + .await; + assert!(res.is_ok()); + let value = res.unwrap(); + assert_eq!(value, 1); + Ok::<_, RedisError>(()) + }) + .unwrap(); + } + #[test] #[serial_test::serial] fn test_async_cluster_read_from_primary() { @@ -3186,10 +3260,17 @@ mod cluster_async { }; // wait for new topology discovery + let max_requests = 5; + let mut i = 0; + let mut cmd = redis::cmd("INFO"); + cmd.arg("SERVER"); loop { - let mut cmd = redis::cmd("INFO"); - cmd.arg("SERVER"); - let res = publishing_con + if i == max_requests { + panic!("Failed to recover and discover new topology"); + } + i += 1; + + if let Ok(res) = publishing_con .route_command( &cmd, RoutingInfo::SingleNode(SingleNodeRoutingInfo::SpecificNode(Route::new( @@ -3197,21 +3278,21 @@ mod cluster_async { SlotAddr::Master, ))), ) - .await; - assert!(res.is_ok()); - let res = res.unwrap(); - match res { - Value::VerbatimString { format: _, text } => { - if text.contains(format!("tcp_port:{}", last_server_port).as_str()) { - // new topology rediscovered - break; + .await + { + match res { + Value::VerbatimString { format: _, text } => { + if text.contains(format!("tcp_port:{}", last_server_port).as_str()) { + // new topology rediscovered + break; + } + } + _ => { + panic!("Wrong return type for INFO SERVER command: {:?}", res); } } - _ => { - panic!("Wrong return type for INFO SERVER command: {:?}", res); - } + sleep(futures_time::time::Duration::from_secs(1)).await; } - sleep(futures_time::time::Duration::from_secs(1)).await; } // sleep for one one cycle of topology refresh @@ -3250,7 +3331,7 @@ mod cluster_async { if use_sharded { // validate SPUBLISH - let result = cmd("SPUBLISH") + let result = redis::cmd("SPUBLISH") .arg("test_channel_?") .arg("test_message") .query_async(&mut publishing_con) @@ -3757,9 +3838,26 @@ mod cluster_async { false, ); - let result = connection.req_packed_command(&cmd).await.unwrap(); - assert_eq!(result, Value::SimpleString("PONG".to_string())); - Ok::<_, RedisError>(()) + let max_requests = 5; + let mut i = 0; + let mut last_err = None; + loop { + if i == max_requests { + break; + } + i += 1; + match connection.req_packed_command(&cmd).await { + Ok(result) => { + assert_eq!(result, Value::SimpleString("PONG".to_string())); + return Ok::<_, RedisError>(()); + } + Err(err) => { + last_err = Some(err); + let _ = sleep(futures_time::time::Duration::from_secs(1)).await; + } + } + } + panic!("Failed to recover after all nodes went down. Last error: {last_err:?}"); }) .unwrap(); } @@ -3786,19 +3884,37 @@ mod cluster_async { ); let cmd = cmd("PING"); - // explicitly route to all primaries and request all succeeded - let result = connection - .route_command( - &cmd, - RoutingInfo::MultiNode(( - MultipleNodeRoutingInfo::AllMasters, - Some(redis::cluster_routing::ResponsePolicy::AllSucceeded), - )), - ) - .await; - assert!(result.is_ok()); - Ok::<_, RedisError>(()) + let max_requests = 5; + let mut i = 0; + let mut last_err = None; + loop { + if i == max_requests { + break; + } + i += 1; + // explicitly route to all primaries and request all succeeded + match connection + .route_command( + &cmd, + RoutingInfo::MultiNode(( + MultipleNodeRoutingInfo::AllMasters, + Some(redis::cluster_routing::ResponsePolicy::AllSucceeded), + )), + ) + .await + { + Ok(result) => { + assert_eq!(result, Value::SimpleString("PONG".to_string())); + return Ok::<_, RedisError>(()); + } + Err(err) => { + last_err = Some(err); + let _ = sleep(futures_time::time::Duration::from_secs(1)).await; + } + } + } + panic!("Failed to recover after all nodes went down. Last error: {last_err:?}"); }) .unwrap(); } @@ -3871,7 +3987,10 @@ mod cluster_async { if connect_attempt > 5 { panic!("Too many pings!"); } - Err(Err(broken_pipe_error())) + Err(Err(RedisError::from(( + ErrorKind::IoErrorRetrySafe, + "mock-io-error", + )))) } else { respond_startup_two_nodes(name, cmd)?; let past_get_attempts = get_attempts.fetch_add(1, Ordering::Relaxed); @@ -3879,7 +3998,10 @@ mod cluster_async { if past_get_attempts == 0 { // Error once with io-error, ensure connection is reestablished w/out calling // other node (i.e., not doing a full slot rebuild) - Err(Err(broken_pipe_error())) + Err(Err(RedisError::from(( + ErrorKind::IoErrorRetrySafe, + "mock-io-error", + )))) } else { Err(Ok(Value::BulkString(b"123".to_vec()))) } @@ -3931,7 +4053,7 @@ mod cluster_async { .expect("Failed executing CLIENT LIST"); let mut client_list_parts = client_list.split('\n'); if client_list_parts - .any(|line| line.contains(MANAGEMENT_CONN_NAME) && line.contains("cmd=cluster")) + .any(|line| line.contains(MANAGEMENT_CONN_NAME) && line.contains("cmd=cluster")) && client_list.matches(MANAGEMENT_CONN_NAME).count() == 1 { return Ok::<_, RedisError>(()); } @@ -3983,21 +4105,23 @@ mod cluster_async { } async fn kill_connection(killer_connection: &mut ClusterConnection, connection_to_kill: &str) { + let default_routing = RoutingInfo::SingleNode(SingleNodeRoutingInfo::SpecificNode( + Route::new(0, SlotAddr::Master), + )); + kill_connection_with_routing(killer_connection, connection_to_kill, default_routing).await; + } + + async fn kill_connection_with_routing( + killer_connection: &mut ClusterConnection, + connection_to_kill: &str, + routing: RoutingInfo, + ) { let mut cmd = redis::cmd("CLIENT"); cmd.arg("KILL"); cmd.arg("ID"); cmd.arg(connection_to_kill); - // Kill the management connection in the primary node that holds slot 0 - assert!(killer_connection - .route_command( - &cmd, - RoutingInfo::SingleNode(SingleNodeRoutingInfo::SpecificNode(Route::new( - 0, - SlotAddr::Master, - )),), - ) - .await - .is_ok()); + // Kill the management connection for the routing node + assert!(killer_connection.route_command(&cmd, routing).await.is_ok()); } #[test]