diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 96ac8fe2cbef..941cc0239d2b 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -2803,11 +2803,11 @@ impl, A: Allocator> IndexMut for Vec { /// * perform the iteration in-place on the original allocation backing the iterator /// /// The last case warrants some attention. It is an optimization that in many cases reduces peak memory -/// consumption and improves cache locality. But when a large number of big, short-lived -/// allocations are created, only a small fraction of their items gets collected, no further use -/// is made of the spare capacity and the resulting `Vec` is moved into a longer-lived structure -/// this can lead to the large allocations having their lifetimes unnecessarily extended which -/// can result in increased memory footprint. +/// consumption and improves cache locality. But when big, short-lived allocations are created, +/// only a small fraction of their items gets collected, no further use is made of the spare capacity +/// and the resulting `Vec` is moved into a longer-lived structure this can lead to the large +/// allocations having their lifetimes unnecessarily extended which can result in increased memory +/// footprint. /// /// In cases where this is an issue, the excess capacity can be discarded with [`Vec::shrink_to()`], /// [`Vec::shrink_to_fit()`] or by collecting into [`Box<[T]>`][owned slice] instead, which additionally reduces @@ -2819,8 +2819,7 @@ impl, A: Allocator> IndexMut for Vec { /// # use std::sync::Mutex; /// static LONG_LIVED: Mutex>> = Mutex::new(Vec::new()); /// -/// // many short-lived allocations -/// for i in 0..100 { +/// for i in 0..10 { /// let big_temporary: Vec = (0..1024).collect(); /// // discard most items /// let mut result: Vec<_> = big_temporary.into_iter().filter(|i| i % 100 == 0).collect();