From 9be91315e6968045178488d82e216fc33c03a4d5 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Thu, 7 Nov 2024 13:48:42 -0800 Subject: [PATCH] channel: Create `list::Block` directly on the heap The list channel's `Block::new` was causing a stack overflow because it held 32 item slots, instantiated on the stack before moving to `Box::new`. The 32x multiplier made modestly-large item sizes untenable. That block is now initialized directly on the heap. References from the `std` channel implementation: * https://github.com/rust-lang/rust/issues/102246 * https://github.com/rust-lang/rust/pull/132738 --- crossbeam-channel/src/flavors/list.rs | 25 +++++++++++++------------ crossbeam-channel/tests/list.rs | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/crossbeam-channel/src/flavors/list.rs b/crossbeam-channel/src/flavors/list.rs index e86551ad2..72f81fee1 100644 --- a/crossbeam-channel/src/flavors/list.rs +++ b/crossbeam-channel/src/flavors/list.rs @@ -1,5 +1,6 @@ //! Unbounded channel implemented as a linked list. +use std::alloc::{alloc_zeroed, Layout}; use std::boxed::Box; use std::cell::UnsafeCell; use std::marker::PhantomData; @@ -50,11 +51,6 @@ struct Slot { } impl Slot { - const UNINIT: Self = Self { - msg: UnsafeCell::new(MaybeUninit::uninit()), - state: AtomicUsize::new(0), - }; - /// Waits until a message is written into the slot. fn wait_write(&self) { let backoff = Backoff::new(); @@ -77,11 +73,16 @@ struct Block { impl Block { /// Creates an empty block. - fn new() -> Self { - Self { - next: AtomicPtr::new(ptr::null_mut()), - slots: [Slot::UNINIT; BLOCK_CAP], - } + fn new() -> Box { + // SAFETY: This is safe because: + // [1] `Block::next` (AtomicPtr) may be safely zero initialized. + // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. + // [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it + // holds a MaybeUninit. + // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. + // TODO: unsafe { Box::new_zeroed().assume_init() } + let layout = Layout::new::(); + unsafe { Box::from_raw(alloc_zeroed(layout).cast()) } } /// Waits until the next pointer is set. @@ -223,13 +224,13 @@ impl Channel { // If we're going to have to install the next block, allocate it in advance in order to // make the wait for other threads as short as possible. if offset + 1 == BLOCK_CAP && next_block.is_none() { - next_block = Some(Box::new(Block::::new())); + next_block = Some(Block::::new()); } // If this is the first message to be sent into the channel, we need to allocate the // first block and install it. if block.is_null() { - let new = Box::into_raw(Box::new(Block::::new())); + let new = Box::into_raw(Block::::new()); if self .tail diff --git a/crossbeam-channel/tests/list.rs b/crossbeam-channel/tests/list.rs index ebe6f6f85..beabac8f2 100644 --- a/crossbeam-channel/tests/list.rs +++ b/crossbeam-channel/tests/list.rs @@ -580,3 +580,18 @@ fn channel_through_channel() { }) .unwrap(); } + +// If `Block` is created on the stack, the array of slots will multiply this `BigStruct` and +// probably overflow the thread stack. It's now directly created on the heap to avoid this. +#[test] +fn stack_overflow() { + const N: usize = 32_768; + struct BigStruct { + _data: [u8; N], + } + + let (sender, receiver) = unbounded::(); + sender.send(BigStruct { _data: [0u8; N] }).unwrap(); + + for _data in receiver.try_iter() {} +}