diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index ff942444a6176..2abea095c8929 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -77,7 +77,7 @@ use core::atomic; use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; use core::fmt; use core::cmp::Ordering; -use core::mem::{min_align_of_val, size_of_val}; +use core::mem::{align_of_val, size_of_val}; use core::intrinsics::drop_in_place; use core::mem; use core::nonzero::NonZero; @@ -230,7 +230,7 @@ impl Arc { if self.inner().weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); - deallocate(ptr as *mut u8, size_of_val(&*ptr), min_align_of_val(&*ptr)) + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } @@ -533,7 +533,7 @@ impl Drop for Weak { atomic::fence(Acquire); unsafe { deallocate(ptr as *mut u8, size_of_val(&*ptr), - min_align_of_val(&*ptr)) } + align_of_val(&*ptr)) } } } } diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 88c5c38172aca..090fc125af471 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -160,7 +160,7 @@ use core::default::Default; use core::fmt; use core::hash::{Hasher, Hash}; use core::marker::{self, Sized}; -use core::mem::{self, min_align_of, size_of, forget}; +use core::mem::{self, align_of, size_of, forget}; use core::nonzero::NonZero; use core::ops::{Deref, Drop}; use core::option::Option; @@ -175,7 +175,7 @@ use core::intrinsics::drop_in_place; #[cfg(not(stage0))] use core::marker::Unsize; #[cfg(not(stage0))] -use core::mem::{min_align_of_val, size_of_val}; +use core::mem::{align_of_val, size_of_val}; #[cfg(not(stage0))] use core::ops::CoerceUnsized; @@ -369,7 +369,7 @@ pub fn try_unwrap(rc: Rc) -> Result> { // destruct the box and skip our Drop // we can ignore the refcounts because we know we're unique deallocate(*rc._ptr as *mut u8, size_of::>(), - min_align_of::>()); + align_of::>()); forget(rc); Ok(val) } @@ -502,7 +502,7 @@ impl Drop for Rc { if self.weak() == 0 { deallocate(ptr as *mut u8, size_of::>(), - min_align_of::>()) + align_of::>()) } } } @@ -556,7 +556,7 @@ impl Drop for Rc { if self.weak() == 0 { deallocate(ptr as *mut u8, size_of_val(&*ptr), - min_align_of_val(&*ptr)) + align_of_val(&*ptr)) } } } @@ -1011,7 +1011,7 @@ impl Drop for Weak { // the strong pointers have disappeared. if self.weak() == 0 { deallocate(ptr as *mut u8, size_of::>(), - min_align_of::>()) + align_of::>()) } } } @@ -1057,7 +1057,7 @@ impl Drop for Weak { // the strong pointers have disappeared. if self.weak() == 0 { deallocate(ptr as *mut u8, size_of_val(&*ptr), - min_align_of_val(&*ptr)) + align_of_val(&*ptr)) } } } diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 5817cee24dc41..dcf3f1f8b5d2a 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -241,7 +241,7 @@ impl<'longer_than_self> Arena<'longer_than_self> { fn alloc_copy(&self, op: F) -> &mut T where F: FnOnce() -> T { unsafe { let ptr = self.alloc_copy_inner(mem::size_of::(), - mem::min_align_of::()); + mem::align_of::()); let ptr = ptr as *mut T; ptr::write(&mut (*ptr), op()); return &mut *ptr; @@ -297,7 +297,7 @@ impl<'longer_than_self> Arena<'longer_than_self> { let tydesc = get_tydesc::(); let (ty_ptr, ptr) = self.alloc_noncopy_inner(mem::size_of::(), - mem::min_align_of::()); + mem::align_of::()); let ty_ptr = ty_ptr as *mut usize; let ptr = ptr as *mut T; // Write in our tydesc along with a bit indicating that it @@ -390,7 +390,7 @@ struct TypedArenaChunk { fn calculate_size(capacity: usize) -> usize { let mut size = mem::size_of::>(); - size = round_up(size, mem::min_align_of::()); + size = round_up(size, mem::align_of::()); let elem_size = mem::size_of::(); let elems_size = elem_size.checked_mul(capacity).unwrap(); size = size.checked_add(elems_size).unwrap(); @@ -402,7 +402,7 @@ impl TypedArenaChunk { unsafe fn new(next: *mut TypedArenaChunk, capacity: usize) -> *mut TypedArenaChunk { let size = calculate_size::(capacity); - let chunk = allocate(size, mem::min_align_of::>()) + let chunk = allocate(size, mem::align_of::>()) as *mut TypedArenaChunk; if chunk.is_null() { alloc::oom() } (*chunk).next = next; @@ -428,7 +428,7 @@ impl TypedArenaChunk { let size = calculate_size::(self.capacity); let self_ptr: *mut TypedArenaChunk = self; deallocate(self_ptr as *mut u8, size, - mem::min_align_of::>()); + mem::align_of::>()); if !next.is_null() { let capacity = (*next).capacity; (*next).destroy(capacity); @@ -441,7 +441,7 @@ impl TypedArenaChunk { let this: *const TypedArenaChunk = self; unsafe { mem::transmute(round_up(this.offset(1) as usize, - mem::min_align_of::())) + mem::align_of::())) } } diff --git a/src/libcollections/btree/node.rs b/src/libcollections/btree/node.rs index 4f3c3b0826342..54a9181d2df5b 100644 --- a/src/libcollections/btree/node.rs +++ b/src/libcollections/btree/node.rs @@ -164,12 +164,12 @@ fn test_offset_calculation() { } fn calculate_allocation_generic(capacity: usize, is_leaf: bool) -> (usize, usize) { - let (keys_size, keys_align) = (capacity * mem::size_of::(), mem::min_align_of::()); - let (vals_size, vals_align) = (capacity * mem::size_of::(), mem::min_align_of::()); + let (keys_size, keys_align) = (capacity * mem::size_of::(), mem::align_of::()); + let (vals_size, vals_align) = (capacity * mem::size_of::(), mem::align_of::()); let (edges_size, edges_align) = if is_leaf { (0, 1) } else { - ((capacity + 1) * mem::size_of::>(), mem::min_align_of::>()) + ((capacity + 1) * mem::size_of::>(), mem::align_of::>()) }; calculate_allocation( @@ -182,11 +182,11 @@ fn calculate_allocation_generic(capacity: usize, is_leaf: bool) -> (usize, fn calculate_offsets_generic(capacity: usize, is_leaf: bool) -> (usize, usize) { let keys_size = capacity * mem::size_of::(); let vals_size = capacity * mem::size_of::(); - let vals_align = mem::min_align_of::(); + let vals_align = mem::align_of::(); let edges_align = if is_leaf { 1 } else { - mem::min_align_of::>() + mem::align_of::>() }; calculate_offsets( diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index d3315758df04b..f13e9115b111b 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -221,7 +221,7 @@ impl Vec { } else { let size = capacity.checked_mul(mem::size_of::()) .expect("capacity overflow"); - let ptr = unsafe { allocate(size, mem::min_align_of::()) }; + let ptr = unsafe { allocate(size, mem::align_of::()) }; if ptr.is_null() { ::alloc::oom() } unsafe { Vec::from_raw_parts(ptr as *mut T, 0, capacity) } } @@ -394,7 +394,7 @@ impl Vec { let ptr = reallocate(*self.ptr as *mut u8, self.cap * mem::size_of::(), self.len * mem::size_of::(), - mem::min_align_of::()) as *mut T; + mem::align_of::()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } @@ -865,9 +865,9 @@ impl Vec { // FIXME: Assert statically that the types `T` and `U` have the // same minimal alignment in case they are not zero-sized. - // These asserts are necessary because the `min_align_of` of the + // These asserts are necessary because the `align_of` of the // types are passed to the allocator by `Vec`. - assert!(mem::min_align_of::() == mem::min_align_of::()); + assert!(mem::align_of::() == mem::align_of::()); // This `as isize` cast is safe, because the size of the elements of the // vector is not 0, and: @@ -1268,9 +1268,9 @@ impl Vec { #[inline(never)] unsafe fn alloc_or_realloc(ptr: *mut T, old_size: usize, size: usize) -> *mut T { if old_size == 0 { - allocate(size, mem::min_align_of::()) as *mut T + allocate(size, mem::align_of::()) as *mut T } else { - reallocate(ptr as *mut u8, old_size, size, mem::min_align_of::()) as *mut T + reallocate(ptr as *mut u8, old_size, size, mem::align_of::()) as *mut T } } @@ -1279,7 +1279,7 @@ unsafe fn dealloc(ptr: *mut T, len: usize) { if mem::size_of::() != 0 { deallocate(ptr as *mut u8, len * mem::size_of::(), - mem::min_align_of::()) + mem::align_of::()) } } diff --git a/src/libcollections/vec_deque.rs b/src/libcollections/vec_deque.rs index f70906f84b869..cf04ec3fe56be 100644 --- a/src/libcollections/vec_deque.rs +++ b/src/libcollections/vec_deque.rs @@ -67,7 +67,7 @@ impl Drop for VecDeque { if mem::size_of::() != 0 { heap::deallocate(*self.ptr as *mut u8, self.cap * mem::size_of::(), - mem::min_align_of::()) + mem::align_of::()) } } } @@ -172,7 +172,7 @@ impl VecDeque { let ptr = unsafe { if mem::size_of::() != 0 { - let ptr = heap::allocate(size, mem::min_align_of::()) as *mut T;; + let ptr = heap::allocate(size, mem::align_of::()) as *mut T;; if ptr.is_null() { ::alloc::oom() } Unique::new(ptr) } else { @@ -344,7 +344,7 @@ impl VecDeque { let ptr = heap::reallocate(*self.ptr as *mut u8, old, new, - mem::min_align_of::()) as *mut T; + mem::align_of::()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } @@ -464,7 +464,7 @@ impl VecDeque { let ptr = heap::reallocate(*self.ptr as *mut u8, old, new_size, - mem::min_align_of::()) as *mut T; + mem::align_of::()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 173b73fdb0924..2dae39a28ab65 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -131,6 +131,7 @@ pub fn size_of_val(_val: &T) -> usize { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] +#[deprecated(reason = "use `align_of` instead", since = "1.1.0")] pub fn min_align_of() -> usize { unsafe { intrinsics::min_align_of::() } } @@ -147,6 +148,7 @@ pub fn min_align_of() -> usize { #[cfg(not(stage0))] #[inline] #[stable(feature = "rust1", since = "1.0.0")] +#[deprecated(reason = "use `align_of_val` instead", since = "1.1.0")] pub fn min_align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } @@ -163,44 +165,54 @@ pub fn min_align_of_val(val: &T) -> usize { #[cfg(stage0)] #[inline] #[stable(feature = "rust1", since = "1.0.0")] +#[deprecated(reason = "use `align_of_val` instead", since = "1.1.0")] pub fn min_align_of_val(_val: &T) -> usize { min_align_of::() } -/// Returns the alignment in memory for a type. +/// Returns the ABI-required minimum alignment of a type /// -/// This function will return the alignment, in bytes, of a type in memory. If the alignment -/// returned is adhered to, then the type is guaranteed to function properly. +/// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// /// # Examples /// /// ``` /// use std::mem; /// -/// assert_eq!(4, mem::align_of::()); +/// assert_eq!(4, mem::min_align_of::()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn align_of() -> usize { - // We use the preferred alignment as the default alignment for a type. This - // appears to be what clang migrated towards as well: - // - // http://lists.cs.uiuc.edu/pipermail/cfe-commits/Week-of-Mon-20110725/044411.html - unsafe { intrinsics::pref_align_of::() } + unsafe { intrinsics::min_align_of::() } } -/// Returns the alignment of the type of the value that `_val` points to. +/// Returns the ABI-required minimum alignment of the type of the value that `val` points to /// -/// This is similar to `align_of`, but function will properly handle types such as trait objects -/// (in the future), returning the alignment for an arbitrary value at runtime. +/// # Examples +/// +/// ``` +/// use std::mem; +/// +/// assert_eq!(4, mem::min_align_of_val(&5i32)); +/// ``` +#[cfg(not(stage0))] +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn align_of_val(val: &T) -> usize { + unsafe { intrinsics::min_align_of_val(val) } +} + +/// Returns the ABI-required minimum alignment of the type of the value that `_val` points to /// /// # Examples /// /// ``` /// use std::mem; /// -/// assert_eq!(4, mem::align_of_val(&5i32)); +/// assert_eq!(4, mem::min_align_of_val(&5i32)); /// ``` +#[cfg(stage0)] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn align_of_val(_val: &T) -> usize { diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index 4841f36c7f747..f262962259d4d 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -15,7 +15,7 @@ use cmp; use hash::{Hash, Hasher}; use iter::{Iterator, ExactSizeIterator}; use marker::{Copy, Send, Sync, Sized, self}; -use mem::{min_align_of, size_of}; +use mem::{align_of, size_of}; use mem; use num::wrapping::OverflowingOps; use ops::{Deref, DerefMut, Drop}; @@ -552,9 +552,9 @@ fn calculate_allocation(hash_size: usize, hash_align: usize, vals_align); let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size); - let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align)); + let align = cmp::max(hash_align, cmp::max(keys_align, vals_align)); - (min_align, hash_offset, end_of_vals, oflo || oflo2) + (align, hash_offset, end_of_vals, oflo || oflo2) } #[test] @@ -596,9 +596,9 @@ impl RawTable { // factored out into a different function. let (malloc_alignment, hash_offset, size, oflo) = calculate_allocation( - hashes_size, min_align_of::(), - keys_size, min_align_of::< K >(), - vals_size, min_align_of::< V >()); + hashes_size, align_of::(), + keys_size, align_of::< K >(), + vals_size, align_of::< V >()); assert!(!oflo, "capacity overflow"); @@ -629,8 +629,8 @@ impl RawTable { let buffer = *self.hashes as *mut u8; let (keys_offset, vals_offset, oflo) = calculate_offsets(hashes_size, - keys_size, min_align_of::(), - min_align_of::()); + keys_size, align_of::(), + align_of::()); debug_assert!(!oflo, "capacity overflow"); unsafe { RawBucket { @@ -1004,9 +1004,9 @@ impl Drop for RawTable { let keys_size = self.capacity * size_of::(); let vals_size = self.capacity * size_of::(); let (align, _, size, oflo) = - calculate_allocation(hashes_size, min_align_of::(), - keys_size, min_align_of::(), - vals_size, min_align_of::()); + calculate_allocation(hashes_size, align_of::(), + keys_size, align_of::(), + vals_size, align_of::()); debug_assert!(!oflo, "should be impossible");