From 40e3e2b2c23dbf8638a4dc7acd5549393c21d129 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 25 Nov 2022 17:57:40 +1300 Subject: [PATCH] Refactor object reference to address (#699) This PR removes most of our uses of `ObjectReference::to_address()`, and replace that with `ObjectModel::ref_to_address()`. With this change, the address we get from an object reference is always guaranteed to be within the allocation. Thus changes in https://github.com/mmtk/mmtk-core/pull/555 are reverted in this PR. This PR also addresses the comments raised in https://github.com/mmtk/mmtk-core/pull/643#discussion_r1009970706. Changes: * Replace our use of `ObjectReference::to_address()` to `ObjectModel::ref_to_address()` * Changes to `ObjectReference`: * Rename `ObjectReference::to_address()` to `ObjectReference::to_raw_address()`, and make it clear that we should avoid using it. * Remove `Address::to_object_reference()`, and add `ObjectReference::from_raw_address()`. Make it clear that we should avoid using it. * Changes to `ObjectModel`: * add `address_to_ref` which does the opposite of `ref_to_address` * add `ref_to_header` * rename `object_start_ref` to `ref_to_object_start`, to make it consistent with other methods. * Change `Treadmill` to store `ObjectReference` instead of `Address`. We previously stored object start address in `Treadmill` and we assumed alloc bit is set on the object start address. With changes in this PR, alloc bit is no longer set on object start address. I made changes accordingly. * Remove code about 'object ref guard' which was used to deal with the case that an object reference (address) may not be in the same chunk as the allocation. That should no longer happen. * `alloc_bit::is_alloced_object(address)` now returns an `Option`. We may consider return `Option` with our API `is_mmtk_object()` as well, but i did not include this change in the PR. --- src/memory_manager.rs | 9 +- src/policy/copyspace.rs | 10 +- src/policy/immix/immixspace.rs | 11 +- src/policy/immix/line.rs | 2 +- src/policy/immortalspace.rs | 9 +- src/policy/largeobjectspace.rs | 40 ++++--- src/policy/lockfreeimmortalspace.rs | 7 +- src/policy/mallocspace/global.rs | 23 ++-- src/policy/mallocspace/metadata.rs | 28 +++-- src/policy/markcompactspace.rs | 20 ++-- src/policy/sft.rs | 19 +-- src/policy/sft_map.rs | 26 ----- src/policy/space.rs | 5 +- src/scheduler/gc_work.rs | 6 +- src/util/address.rs | 68 +++++++++-- src/util/alloc/bumpallocator.rs | 3 +- src/util/alloc/immix_allocator.rs | 7 +- src/util/alloc/large_object_allocator.rs | 3 - src/util/alloc/malloc_allocator.rs | 25 +--- src/util/alloc/mod.rs | 3 - src/util/alloc/object_ref_guard.rs | 65 ----------- src/util/alloc_bit.rs | 92 +++++++++------ src/util/linear_scan.rs | 9 +- src/util/metadata/global.rs | 27 +++-- src/util/metadata/header_metadata.rs | 109 +++++++----------- src/util/object_forwarding.rs | 19 ++- src/util/reference_processor.rs | 2 +- src/util/sanity/memory_scan.rs | 37 ------ src/util/sanity/mod.rs | 1 - src/util/treadmill.rs | 42 ++++--- src/vm/edge_shape.rs | 2 +- src/vm/object_model.rs | 68 +++++++---- src/vm/reference_glue.rs | 5 +- vmbindings/dummyvm/src/api.rs | 2 +- vmbindings/dummyvm/src/edges.rs | 12 +- vmbindings/dummyvm/src/object_model.rs | 17 ++- vmbindings/dummyvm/src/tests/conservatism.rs | 26 ++--- vmbindings/dummyvm/src/tests/edges_test.rs | 26 ++--- vmbindings/dummyvm/src/tests/fixtures/mod.rs | 6 +- .../dummyvm/src/tests/is_in_mmtk_spaces.rs | 14 +-- 40 files changed, 415 insertions(+), 490 deletions(-) delete mode 100644 src/util/alloc/object_ref_guard.rs delete mode 100644 src/util/sanity/memory_scan.rs diff --git a/src/memory_manager.rs b/src/memory_manager.rs index ec14799225..182d0d0322 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -646,10 +646,15 @@ pub fn is_mmtk_object(addr: Address) -> bool { /// /// Arguments: /// * `object`: The object reference to query. -pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool { +pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool { use crate::mmtk::SFT_MAP; use crate::policy::sft_map::SFTMap; - SFT_MAP.get_checked(object.to_address()).is_in_space(object) + if object.is_null() { + return false; + } + SFT_MAP + .get_checked(object.to_address::()) + .is_in_space(object) } /// Is the address in the mapped memory? The runtime can use this function to check diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 34368e01f7..5ea8174098 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -47,7 +47,7 @@ impl SFT for CopySpace { fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + crate::util::alloc_bit::set_alloc_bit::(_object); } #[inline(always)] @@ -63,6 +63,12 @@ impl SFT for CopySpace { } } + #[cfg(feature = "is_mmtk_object")] + #[inline(always)] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::alloc_bit::is_alloced_object::(addr).is_some() + } + #[inline(always)] fn sft_trace_object( &self, @@ -231,7 +237,7 @@ impl CopySpace { #[cfg(feature = "global_alloc_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::alloc_bit::is_alloced::(object), "{:x}: alloc bit not set", object ); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index dd6ba762ac..4e96ff4c1f 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -77,7 +77,12 @@ impl SFT for ImmixSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + crate::util::alloc_bit::set_alloc_bit::(_object); + } + #[cfg(feature = "is_mmtk_object")] + #[inline(always)] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::alloc_bit::is_alloced_object::(addr).is_some() } #[inline(always)] fn sft_trace_object( @@ -390,7 +395,7 @@ impl ImmixSpace { ) -> ObjectReference { #[cfg(feature = "global_alloc_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::alloc_bit::is_alloced::(object), "{:x}: alloc bit not set", object ); @@ -483,7 +488,7 @@ impl ImmixSpace { object } else { #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_alloc_bit(object); + crate::util::alloc_bit::unset_alloc_bit::(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; debug_assert_eq!( diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index 1d974921e4..19e9f930b9 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -71,7 +71,7 @@ impl Line { #[inline] pub fn mark_lines_for_object(object: ObjectReference, state: u8) -> usize { debug_assert!(!super::BLOCK_ONLY); - let start = VM::VMObjectModel::object_start_ref(object); + let start = VM::VMObjectModel::ref_to_object_start(object); let end = start + VM::VMObjectModel::get_current_size(object); let start_line = Line::from_unaligned_address(start); let mut end_line = Line::from_unaligned_address(end); diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 68d699b295..d6338c52e8 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -70,7 +70,12 @@ impl SFT for ImmortalSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(object); + crate::util::alloc_bit::set_alloc_bit::(object); + } + #[cfg(feature = "is_mmtk_object")] + #[inline(always)] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::alloc_bit::is_alloced_object::(addr).is_some() } #[inline(always)] fn sft_trace_object( @@ -209,7 +214,7 @@ impl ImmortalSpace { ) -> ObjectReference { #[cfg(feature = "global_alloc_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::alloc_bit::is_alloced::(object), "{:x}: alloc bit not set", object ); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index cbb7d9a76a..7b64d89b36 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -73,9 +73,13 @@ impl SFT for LargeObjectSpace { } #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(object); - let cell = VM::VMObjectModel::object_start_ref(object); - self.treadmill.add_to_treadmill(cell, alloc); + crate::util::alloc_bit::set_alloc_bit::(object); + self.treadmill.add_to_treadmill(object, alloc); + } + #[cfg(feature = "is_mmtk_object")] + #[inline(always)] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::alloc_bit::is_alloced_object::(addr).is_some() } #[inline(always)] fn sft_trace_object( @@ -205,7 +209,7 @@ impl LargeObjectSpace { ) -> ObjectReference { #[cfg(feature = "global_alloc_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::alloc_bit::is_alloced::(object), "{:x}: alloc bit not set", object ); @@ -213,8 +217,7 @@ impl LargeObjectSpace { if !self.in_nursery_gc || nursery_object { // Note that test_and_mark() has side effects if self.test_and_mark(object, self.mark_state) { - let cell = VM::VMObjectModel::object_start_ref(object); - self.treadmill.copy(cell, nursery_object); + self.treadmill.copy(object, nursery_object); self.clear_nursery(object); // We just moved the object out of the logical nursery, mark it as unlogged. if nursery_object && self.common.needs_log_bit { @@ -228,22 +231,21 @@ impl LargeObjectSpace { } fn sweep_large_pages(&mut self, sweep_nursery: bool) { - // FIXME: borrow checker fighting - // didn't call self.release_multiple_pages - // so the compiler knows I'm borrowing two different fields + let sweep = |object: ObjectReference| { + #[cfg(feature = "global_alloc_bit")] + crate::util::alloc_bit::unset_alloc_bit::(object); + self.pr + .release_pages(get_super_page(VM::VMObjectModel::ref_to_object_start( + object, + ))); + }; if sweep_nursery { - for cell in self.treadmill.collect_nursery() { - // println!("- cn {}", cell); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_addr_alloc_bit(cell); - self.pr.release_pages(get_super_page(cell)); + for object in self.treadmill.collect_nursery() { + sweep(object); } } else { - for cell in self.treadmill.collect() { - // println!("- ts {}", cell); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_addr_alloc_bit(cell); - self.pr.release_pages(get_super_page(cell)); + for object in self.treadmill.collect() { + sweep(object) } } } diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index c836b32982..a44f1744b1 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -59,7 +59,12 @@ impl SFT for LockFreeImmortalSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + crate::util::alloc_bit::set_alloc_bit::(_object); + } + #[cfg(feature = "is_mmtk_object")] + #[inline(always)] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::alloc_bit::is_alloced_object::(addr).is_some() } fn sft_trace_object( &self, diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index c861bf240d..e914fd7c48 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -74,7 +74,7 @@ impl SFT for MallocSpace { // For malloc space, we need to further check the alloc bit. fn is_in_space(&self, object: ObjectReference) -> bool { - is_alloced_by_malloc(object) + is_alloced_by_malloc::(object) } /// For malloc space, we just use the side metadata. @@ -84,12 +84,12 @@ impl SFT for MallocSpace { debug_assert!(!addr.is_zero()); // `addr` cannot be mapped by us. It should be mapped by the malloc library. debug_assert!(!addr.is_mapped()); - has_object_alloced_by_malloc(addr) + has_object_alloced_by_malloc::(addr).is_some() } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { trace!("initialize_object_metadata for object {}", object); - set_alloc_bit(object); + set_alloc_bit::(object); } #[inline(always)] @@ -131,11 +131,11 @@ impl Space for MallocSpace { // We have assertions in a debug build. We allow this pattern for the release build. #[allow(clippy::let_and_return)] fn in_space(&self, object: ObjectReference) -> bool { - let ret = is_alloced_by_malloc(object); + let ret = is_alloced_by_malloc::(object); #[cfg(debug_assertions)] if ASSERT_ALLOCATION { - let addr = VM::VMObjectModel::object_start_ref(object); + let addr = VM::VMObjectModel::ref_to_object_start(object); let active_mem = self.active_mem.lock().unwrap(); if ret { // The alloc bit tells that the object is in space. @@ -364,15 +364,15 @@ impl MallocSpace { return object; } - let address = object.to_address(); assert!( self.in_space(object), "Cannot mark an object {} that was not alloced by malloc.", - address, + object, ); if !is_marked::(object, Ordering::Relaxed) { - let chunk_start = conversions::chunk_align_down(address); + let chunk_start = + conversions::chunk_align_down(VM::VMObjectModel::ref_to_object_start(object)); set_mark_bit::(object, Ordering::SeqCst); set_chunk_mark(chunk_start); queue.enqueue(object); @@ -440,7 +440,7 @@ impl MallocSpace { /// Given an object in MallocSpace, return its malloc address, whether it is an offset malloc, and malloc size #[inline(always)] fn get_malloc_addr_size(object: ObjectReference) -> (Address, bool, usize) { - let obj_start = VM::VMObjectModel::object_start_ref(object); + let obj_start = VM::VMObjectModel::ref_to_object_start(object); let offset_malloc_bit = is_offset_malloc(obj_start); let bytes = get_malloc_usable_size(obj_start, offset_malloc_bit); (obj_start, offset_malloc_bit, bytes) @@ -470,7 +470,7 @@ impl MallocSpace { // Free object self.free_internal(obj_start, bytes, offset_malloc); trace!("free object {}", object); - unsafe { unset_alloc_bit_unsafe(object) }; + unsafe { unset_alloc_bit_unsafe::(object) }; true } else { @@ -479,7 +479,8 @@ impl MallocSpace { // Unset marks for free pages and update last_object_end if !empty_page_start.is_zero() { // unset marks for pages since last object - let current_page = object.to_address().align_down(BYTES_IN_MALLOC_PAGE); + let current_page = + VM::VMObjectModel::ref_to_object_start(object).align_down(BYTES_IN_MALLOC_PAGE); if current_page > *empty_page_start { // we are the only GC thread that is accessing this chunk unsafe { diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 3c5b5989e2..e2175a8540 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -153,16 +153,21 @@ pub fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize } /// Check if a given object was allocated by malloc -pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { - has_object_alloced_by_malloc(object.to_address()) +pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { + is_meta_space_mapped_for_address(object.to_address::()) + && alloc_bit::is_alloced::(object) } /// Check if there is an object allocated by malloc at the address. /// /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. -pub fn has_object_alloced_by_malloc(addr: Address) -> bool { - is_meta_space_mapped_for_address(addr) && alloc_bit::is_alloced_object(addr) +#[cfg(feature = "is_mmtk_object")] +pub fn has_object_alloced_by_malloc(addr: Address) -> Option { + if !is_meta_space_mapped_for_address(addr) { + return None; + } + alloc_bit::is_alloced_object::(addr) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { @@ -213,8 +218,8 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { ACTIVE_CHUNK_METADATA_SPEC.load::(chunk_start) == 1 } -pub fn set_alloc_bit(object: ObjectReference) { - alloc_bit::set_alloc_bit(object); +pub fn set_alloc_bit(object: ObjectReference) { + alloc_bit::set_alloc_bit::(object); } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { @@ -222,8 +227,8 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) } #[allow(unused)] -pub fn unset_alloc_bit(object: ObjectReference) { - alloc_bit::unset_alloc_bit(object); +pub fn unset_alloc_bit(object: ObjectReference) { + alloc_bit::unset_alloc_bit::(object); } #[allow(unused)] @@ -235,20 +240,23 @@ pub(super) fn set_chunk_mark(chunk_start: Address) { ACTIVE_CHUNK_METADATA_SPEC.store_atomic::(chunk_start, 1, Ordering::SeqCst); } +/// Is this allocation an offset malloc? The argument address should be the allocation address (object start) pub(super) fn is_offset_malloc(address: Address) -> bool { unsafe { OFFSET_MALLOC_METADATA_SPEC.load::(address) == 1 } } +/// Set the offset bit for the allocation. The argument address should be the allocation address (object start) pub(super) fn set_offset_malloc_bit(address: Address) { OFFSET_MALLOC_METADATA_SPEC.store_atomic::(address, 1, Ordering::SeqCst); } +/// Unset the offset bit for the allocation. The argument address should be the allocation address (object start) pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { OFFSET_MALLOC_METADATA_SPEC.store::(address, 0); } -pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - alloc_bit::unset_alloc_bit_unsafe(object); +pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { + alloc_bit::unset_alloc_bit_unsafe::(object); } #[allow(unused)] diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index a6f3da1dd7..89a5eb6df1 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -57,7 +57,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::alloc_bit::set_alloc_bit(object); + crate::util::alloc_bit::set_alloc_bit::(object); } #[cfg(feature = "sanity")] @@ -65,6 +65,12 @@ impl SFT for MarkCompactSpace { true } + #[cfg(feature = "is_mmtk_object")] + #[inline(always)] + fn is_mmtk_object(&self, addr: Address) -> bool { + crate::util::alloc_bit::is_alloced_object::(addr).is_some() + } + #[inline(always)] fn sft_trace_object( &self, @@ -153,7 +159,7 @@ impl MarkCompactSpace { /// Get the address for header forwarding pointer #[inline(always)] fn header_forwarding_pointer_address(object: ObjectReference) -> Address { - VM::VMObjectModel::object_start_ref(object) - GC_EXTRA_HEADER_BYTES + VM::VMObjectModel::ref_to_object_start(object) - GC_EXTRA_HEADER_BYTES } /// Get header forwarding pointer for an object @@ -231,7 +237,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::alloc_bit::is_alloced::(object), "{:x}: alloc bit not set", object ); @@ -247,7 +253,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::alloc_bit::is_alloced::(object), "{:x}: alloc bit not set", object ); @@ -377,7 +383,7 @@ impl MarkCompactSpace { ); for obj in linear_scan { // clear the alloc bit - alloc_bit::unset_addr_alloc_bit(obj.to_address()); + alloc_bit::unset_alloc_bit::(obj); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); @@ -391,8 +397,8 @@ impl MarkCompactSpace { trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update alloc_bit, - alloc_bit::set_alloc_bit(new_object); - to = new_object.to_address() + copied_size; + alloc_bit::set_alloc_bit::(new_object); + to = VM::VMObjectModel::ref_to_object_start(new_object) + copied_size; debug_assert_eq!(end_of_new_object, to); } } diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 7036d34f16..7ad16aa9a3 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -1,8 +1,5 @@ use crate::plan::VectorObjectQueue; use crate::scheduler::GCWorker; -#[cfg(feature = "is_mmtk_object")] -use crate::util::alloc_bit; -use crate::util::conversions; use crate::util::*; use crate::vm::VMBinding; use std::marker::PhantomData; @@ -68,18 +65,7 @@ pub trait SFT { /// Some spaces, like `MallocSpace`, use third-party libraries to allocate memory. /// Such spaces needs to override this method. #[cfg(feature = "is_mmtk_object")] - #[inline(always)] - fn is_mmtk_object(&self, addr: Address) -> bool { - // Having found the SFT means the `addr` is in one of our spaces. - // Although the SFT map is allocated eagerly when the space is contiguous, - // the pages of the space itself are acquired on demand. - // Therefore, the page of `addr` may not have been mapped, yet. - if !addr.is_mapped() { - return false; - } - // The `addr` is mapped. We use the global alloc bit to get the exact answer. - alloc_bit::is_alloced_object(addr) - } + fn is_mmtk_object(&self, addr: Address) -> bool; /// Initialize object metadata (in the header, or in the side metadata). fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool); @@ -166,9 +152,8 @@ impl SFT for EmptySpaceSFT { ) -> ObjectReference { // We do not have the `VM` type parameter here, so we cannot forward the call to the VM. panic!( - "Call trace_object() on {} (chunk {}), which maps to an empty space. SFTProcessEdges does not support the fallback to vm_trace_object().", + "Call trace_object() on {}, which maps to an empty space. SFTProcessEdges does not support the fallback to vm_trace_object().", object, - conversions::chunk_align_down(object.to_address()), ) } } diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index db5eddf4f0..2399e4d7ad 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -1,10 +1,6 @@ use super::sft::*; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; -#[cfg(debug_assertions)] -use crate::util::ObjectReference; -#[cfg(debug_assertions)] -use crate::vm::VMBinding; /// SFTMap manages the SFT table, and mapping between addresses with indices in the table. The trait allows /// us to have multiple implementations of the SFT table. @@ -62,28 +58,6 @@ pub trait SFTMap { /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range. /// Otherwise, the caller should check with `has_sft_entry()` before calling this method. unsafe fn clear(&self, address: Address); - - /// Make sure we have valid SFT entries for the object reference. - #[cfg(debug_assertions)] - fn assert_valid_entries_for_object(&self, object: ObjectReference) { - use crate::vm::ObjectModel; - let object_sft = self.get_checked(object.to_address()); - let object_start_sft = self.get_checked(VM::VMObjectModel::object_start_ref(object)); - - debug_assert!( - object_sft.name() != EMPTY_SFT_NAME, - "Object {} has empty SFT", - object - ); - debug_assert_eq!( - object_sft.name(), - object_start_sft.name(), - "Object {} has incorrect SFT entries (object start = {}, object = {}).", - object, - object_start_sft.name(), - object_sft.name() - ); - } } cfg_if::cfg_if! { diff --git a/src/policy/space.rs b/src/policy/space.rs index 5b5cb405a8..d0e503ee4e 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -6,7 +6,7 @@ use crate::util::ObjectReference; use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_BYTES, LOG_BYTES_IN_CHUNK}; use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_END, AVAILABLE_START}; use crate::util::heap::{PageResource, VMRequest}; -use crate::vm::{ActivePlan, Collection, ObjectModel}; +use crate::vm::{ActivePlan, Collection}; use crate::util::constants::LOG_BYTES_IN_MBYTE; use crate::util::conversions; @@ -187,8 +187,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { } fn in_space(&self, object: ObjectReference) -> bool { - let start = VM::VMObjectModel::ref_to_address(object); - self.address_in_space(start) + self.address_in_space(object.to_address::()) } /** diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 582f6368d8..558b5a4b7e 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -522,15 +522,11 @@ impl ProcessEdgesWork for SFTProcessEdges { return object; } - // Make sure we have valid SFT entries for the object. - #[cfg(debug_assertions)] - crate::mmtk::SFT_MAP.assert_valid_entries_for_object::(object); - // Erase type parameter let worker = GCWorkerMutRef::new(self.worker()); // Invoke trace object on sft - let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_address()) }; + let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_address::()) }; sft.sft_trace_object(&mut self.base.nodes, object, worker) } diff --git a/src/util/address.rs b/src/util/address.rs index 21b91c8d63..16369c42ac 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -300,16 +300,6 @@ impl Address { conversions::raw_is_aligned(self.0, align) } - /// converts the Address into an ObjectReference - /// # Safety - /// We would expect ObjectReferences point to valid objects, - /// but an arbitrary Address may not reside an object. This conversion is unsafe, - /// and it is the user's responsibility to ensure the safety. - #[inline(always)] - pub unsafe fn to_object_reference(self) -> ObjectReference { - mem::transmute(self.0) - } - /// converts the Address to a pointer #[inline(always)] pub fn to_ptr(self) -> *const T { @@ -461,10 +451,23 @@ mod tests { } } +use crate::vm::VMBinding; + /// ObjectReference represents address for an object. Compared with Address, /// operations allowed on ObjectReference are very limited. No address arithmetics /// are allowed for ObjectReference. The idea is from the paper /// High-level Low-level Programming (VEE09) and JikesRVM. +/// +/// A runtime may define its "object references" differently. It may define an object reference as +/// the address of an object, a handle that points to an indirection table entry where a pointer to +/// the object is held, or anything else. Regardless, MMTk expects each object reference to have a +/// pointer to the object (an address) in each object reference, and that address should be used +/// for this `ObjectReference` type. +/// +/// We currently do not allow an opaque `ObjectReference` type for which a binding can define +/// their layout. We now only allow a binding to define their semantics through a set of +/// methods in [`crate::vm::ObjectModel`]. Major refactoring is needed in MMTk to allow +/// the opaque `ObjectReference` type, and we haven't seen a use case for now. #[repr(transparent)] #[derive(Copy, Clone, Eq, Hash, PartialOrd, PartialEq)] pub struct ObjectReference(usize); @@ -472,12 +475,53 @@ pub struct ObjectReference(usize); impl ObjectReference { pub const NULL: ObjectReference = ObjectReference(0); - /// converts the ObjectReference to an Address + /// Cast the object reference to its raw address. This method is mostly for the convinience of a binding. + /// + /// MMTk should not make any assumption on the actual location of the address with the object reference. + /// MMTk should not assume the address returned by this method is in our allocation. For the purposes of + /// setting object metadata, MMTk should use [`crate::vm::ObjectModel::ref_to_address()`] or [`crate::vm::ObjectModel::ref_to_header()`]. #[inline(always)] - pub fn to_address(self) -> Address { + pub fn to_raw_address(self) -> Address { Address(self.0) } + /// Cast a raw address to an object reference. This method is mostly for the convinience of a binding. + /// This is how a binding creates `ObjectReference` instances. + /// + /// MMTk should not assume an arbitrary address can be turned into an object reference. MMTk can use [`crate::vm::ObjectModel::address_to_ref()`] + /// to turn addresses that are from [`crate::vm::ObjectModel::ref_to_address()`] back to object. + #[inline(always)] + pub fn from_raw_address(addr: Address) -> ObjectReference { + ObjectReference(addr.0) + } + + /// Get the in-heap address from an object reference. This method is used by MMTk to get an in-heap address + /// for an object reference. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_address`]. See the + /// comments on [`crate::vm::ObjectModel::ref_to_address`]. + #[inline(always)] + pub fn to_address(self) -> Address { + use crate::vm::ObjectModel; + VM::VMObjectModel::ref_to_address(self) + } + + /// Get the header base address from an object reference. This method is used by MMTk to get a base address for the + /// object header, and access the object header. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_header`]. + /// See the comments on [`crate::vm::ObjectModel::ref_to_header`]. + #[inline(always)] + pub fn to_header(self) -> Address { + use crate::vm::ObjectModel; + VM::VMObjectModel::ref_to_header(self) + } + + /// Get the object reference from an address that is returned from [`crate::util::address::ObjectReference::to_address`] + /// or [`crate::vm::ObjectModel::ref_to_address`]. This method is syntactic sugar for [`crate::vm::ObjectModel::address_to_ref`]. + /// See the comments on [`crate::vm::ObjectModel::address_to_ref`]. + #[inline(always)] + pub fn from_address(addr: Address) -> ObjectReference { + use crate::vm::ObjectModel; + VM::VMObjectModel::address_to_ref(addr) + } + /// is this object reference null reference? #[inline(always)] pub fn is_null(self) -> bool { diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 4d1276331a..93fda5c3b7 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -1,5 +1,4 @@ use super::allocator::{align_allocation_no_fill, fill_alignment_gap}; -use super::object_ref_guard::adjust_thread_local_buffer_limit; use crate::util::Address; use crate::util::alloc::Allocator; @@ -31,7 +30,7 @@ pub struct BumpAllocator { impl BumpAllocator { pub fn set_limit(&mut self, cursor: Address, limit: Address) { self.cursor = cursor; - self.limit = adjust_thread_local_buffer_limit::(limit); + self.limit = limit; } pub fn reset(&mut self) { diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 17e9c00b13..19690c68f3 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -1,5 +1,4 @@ use super::allocator::{align_allocation_no_fill, fill_alignment_gap}; -use super::object_ref_guard::adjust_thread_local_buffer_limit; use crate::plan::Plan; use crate::policy::immix::line::*; use crate::policy::immix::ImmixSpace; @@ -249,7 +248,7 @@ impl ImmixAllocator { { // Find recyclable lines. Update the bump allocation cursor and limit. self.cursor = start_line.start(); - self.limit = adjust_thread_local_buffer_limit::(end_line.start()); + self.limit = end_line.start(); trace!( "{:?}: acquire_recyclable_lines -> {:?} [{:?}, {:?}) {:?}", self.tls, @@ -307,10 +306,10 @@ impl ImmixAllocator { ); if self.request_for_large { self.large_cursor = block.start(); - self.large_limit = adjust_thread_local_buffer_limit::(block.end()); + self.large_limit = block.end(); } else { self.cursor = block.start(); - self.limit = adjust_thread_local_buffer_limit::(block.end()); + self.limit = block.end(); } self.alloc(size, align, offset) } diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index 928a6128ae..bf46fc1fd7 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -35,9 +35,6 @@ impl Allocator for LargeObjectAllocator { } fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { - #[cfg(debug_assertions)] - crate::util::alloc::object_ref_guard::assert_object_ref_in_cell::(size); - let cell: Address = self.alloc_slow(size, align, offset); // We may get a null ptr from alloc due to the VM being OOM if !cell.is_zero() { diff --git a/src/util/alloc/malloc_allocator.rs b/src/util/alloc/malloc_allocator.rs index 1df55d585a..d5604a475b 100644 --- a/src/util/alloc/malloc_allocator.rs +++ b/src/util/alloc/malloc_allocator.rs @@ -1,6 +1,5 @@ use crate::policy::mallocspace::MallocSpace; use crate::policy::space::Space; -use crate::util::alloc::object_ref_guard::object_ref_may_cross_chunk; use crate::util::alloc::Allocator; use crate::util::opaque_pointer::*; use crate::util::Address; @@ -41,29 +40,7 @@ impl Allocator for MallocAllocator { fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address { assert!(offset >= 0); - let ret = self.space.alloc(self.tls, size, align, offset); - if !object_ref_may_cross_chunk::(ret) { - ret - } else { - // The address we got does not pass object ref checks. We cache it and free it later. - // We free the results in the end to avoid malloc giving us the free'd address again. - // The creation of the vec is put here so for the common case where we succeed in the first allocation, - // we do not need to create this vec. - let mut to_free = vec![ret]; - loop { - let ret = self.space.alloc(self.tls, size, align, offset); - if object_ref_may_cross_chunk::(ret) { - // The result does not pass check. Cache it. - to_free.push(ret); - } else { - // The result passes the check. We free all the cached results, and return the new result. - for addr in to_free.iter() { - self.space.free(*addr); - } - return ret; - } - } - } + self.space.alloc(self.tls, size, align, offset) } } diff --git a/src/util/alloc/mod.rs b/src/util/alloc/mod.rs index e9abddc823..149fd2cd99 100644 --- a/src/util/alloc/mod.rs +++ b/src/util/alloc/mod.rs @@ -6,9 +6,6 @@ pub use allocator::fill_alignment_gap; pub use allocator::AllocationError; pub use allocator::Allocator; -/// Functions to ensure an object reference for an allocation has valid metadata. -mod object_ref_guard; - /// A list of all the allocators, embedded in Mutator pub(crate) mod allocators; pub use allocators::AllocatorSelector; diff --git a/src/util/alloc/object_ref_guard.rs b/src/util/alloc/object_ref_guard.rs deleted file mode 100644 index 43ed549929..0000000000 --- a/src/util/alloc/object_ref_guard.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! This module includes functions to make sure the following invariant always holds: for each object we allocate (`[cell, cell + bytes)`), the metadata for -//! the object reference (`object_ref`) is always in the range of the allocated memory. Given that we always initialize metadata based on chunks, -//! we simply need to make sure that `object_ref` is in the same chunk as `[cell, cell + bytes)`. In other words, we avoid -//! allocating an address for which the object reference may be in another chunk. -//! -//! Note that where an ObjectReference points to is defined by a binding. We only have this problem if an object reference may point -//! to an address that is outside our allocated memory (`object_ref >= cell + bytes`). We ask a binding to specify -//! `ObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL` if their object reference may point to an address outside the allocated -//! memory. `ObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL` should be the max of `object_ref - cell`. -//! -//! There are various ways we deal with this. -//! * For allocators that have a thread local buffer, we can adjust the buffer limit to make sure that the last object allocated in the -//! buffer won't cross chunks. -//! * For allocators that allocate large objects, if the object size is larger than `OBJECT_REF_OFFSET_BEYOND_CELL`, it is guaranteed that -//! the object reference is within the allocated memory. -//! * For other allocators, we can check if the allocation result violates this invariant. - -use crate::util::heap::layout::vm_layout_constants::{BYTES_IN_CHUNK, CHUNK_MASK}; -use crate::util::Address; -use crate::vm::ObjectModel; -use crate::vm::VMBinding; - -/// Adjust limit for thread local buffer to make sure that we will not allocate objects whose object reference may -/// be in another chunk. -pub fn adjust_thread_local_buffer_limit(limit: Address) -> Address { - // We only need to adjust limit when the binding tells us that - // object ref may point outside the allocated memory and when limit is at chunk boundary - if let Some(offset) = VM::VMObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL { - if limit.is_aligned_to(BYTES_IN_CHUNK) { - debug_assert!(limit.as_usize() > offset); - // We simply not use the last few bytes. This is a rare case anyway (expect less than 1% of slowpath allocation goes here). - // It should be possible for us to check if we can use the last few bytes to finish an allocation request when we 'exhaust' - // thread local buffer. But probably it won't give us much benefit and it complicates our allocation code. - return limit - offset; - } - } - - limit -} - -/// Assert that the object reference should always inside the allocation cell -#[cfg(debug_assertions)] -pub fn assert_object_ref_in_cell(size: usize) { - if VM::VMObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL.is_none() { - return; - } - - // If the object ref offset is smaller than size, it is always inside the allocation cell. - debug_assert!( - size > VM::VMObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL.unwrap(), - "Allocating objects of size {} may cross chunk (OBJECT_REF_OFFSET_BEYOND_CELL = {})", - size, - VM::VMObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL.unwrap() - ); -} - -/// Check if the object reference for this allocation may cross and fall into the next chunk. -pub fn object_ref_may_cross_chunk(addr: Address) -> bool { - if VM::VMObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL.is_none() { - return false; - } - - (addr & CHUNK_MASK) + VM::VMObjectModel::OBJECT_REF_OFFSET_BEYOND_CELL.unwrap() - >= BYTES_IN_CHUNK -} diff --git a/src/util/alloc_bit.rs b/src/util/alloc_bit.rs index e9bb9f93ca..7702620b10 100644 --- a/src/util/alloc_bit.rs +++ b/src/util/alloc_bit.rs @@ -1,10 +1,9 @@ use atomic::Ordering; -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; -use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; use crate::util::ObjectReference; +use crate::vm::VMBinding; /// An alloc-bit is required per min-object-size aligned address , rather than per object, and can only exist as side metadata. pub(crate) const ALLOC_SIDE_METADATA_SPEC: SideMetadataSpec = @@ -12,59 +11,82 @@ pub(crate) const ALLOC_SIDE_METADATA_SPEC: SideMetadataSpec = pub const ALLOC_SIDE_METADATA_ADDR: Address = ALLOC_SIDE_METADATA_SPEC.get_absolute_offset(); -pub fn map_meta_space_for_chunk(metadata: &SideMetadataContext, chunk_start: Address) { - let mmap_metadata_result = metadata.try_map_metadata_space(chunk_start, BYTES_IN_CHUNK); +/// Atomically set the alloc bit for an object. +pub fn set_alloc_bit(object: ObjectReference) { debug_assert!( - mmap_metadata_result.is_ok(), - "mmap sidemetadata failed for chunk_start ({})", - chunk_start + !is_alloced::(object), + "{:x}: alloc bit already set", + object ); + ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 1, Ordering::SeqCst); } -pub fn set_alloc_bit(object: ObjectReference) { - debug_assert!(!is_alloced(object), "{:x}: alloc bit already set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); -} - -pub fn unset_addr_alloc_bit(address: Address) { - debug_assert!( - is_alloced_object(address), - "{:x}: alloc bit not set", - address - ); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(address, 0, Ordering::SeqCst); -} - -pub fn unset_alloc_bit(object: ObjectReference) { - debug_assert!(is_alloced(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); +/// Atomically unset the alloc bit for an object. +pub fn unset_alloc_bit(object: ObjectReference) { + debug_assert!(is_alloced::(object), "{:x}: alloc bit not set", object); + ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); } +/// Non-atomically unset the alloc bit for an object. The caller needs to ensure the side +/// metadata for the alloc bit for the object is accessed by only one thread. +/// /// # Safety /// /// This is unsafe: check the comment on `side_metadata::store` -/// -pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - debug_assert!(is_alloced(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store::(object.to_address(), 0); +pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { + debug_assert!(is_alloced::(object), "{:x}: alloc bit not set", object); + ALLOC_SIDE_METADATA_SPEC.store::(object.to_address::(), 0); } -pub fn is_alloced(object: ObjectReference) -> bool { - is_alloced_object(object.to_address()) +/// Check if the alloc bit is set for an object. +pub fn is_alloced(object: ObjectReference) -> bool { + ALLOC_SIDE_METADATA_SPEC.load_atomic::(object.to_address::(), Ordering::SeqCst) == 1 } -pub fn is_alloced_object(address: Address) -> bool { - ALLOC_SIDE_METADATA_SPEC.load_atomic::(address, Ordering::SeqCst) == 1 +/// Check if an address can be turned directly into an object reference using the alloc bit. +/// If so, return `Some(object)`. Otherwise return `None`. +#[inline] +pub fn is_alloced_object(address: Address) -> Option { + let potential_object = ObjectReference::from_raw_address(address); + let addr = potential_object.to_address::(); + + // If we haven't mapped alloc bit for the address, it cannot be an object + if !ALLOC_SIDE_METADATA_SPEC.is_mapped(addr) { + return None; + } + + if ALLOC_SIDE_METADATA_SPEC.load_atomic::(addr, Ordering::SeqCst) == 1 { + Some(potential_object) + } else { + None + } } +/// Check if an address can be turned directly into an object reference using the alloc bit. +/// If so, return `Some(object)`. Otherwise return `None`. The caller needs to ensure the side +/// metadata for the alloc bit for the object is accessed by only one thread. +/// /// # Safety /// /// This is unsafe: check the comment on `side_metadata::load` -/// -pub unsafe fn is_alloced_object_unsafe(address: Address) -> bool { - ALLOC_SIDE_METADATA_SPEC.load::(address) == 1 +#[inline] +pub unsafe fn is_alloced_object_unsafe(address: Address) -> Option { + let potential_object = ObjectReference::from_raw_address(address); + let addr = potential_object.to_address::(); + + // If we haven't mapped alloc bit for the address, it cannot be an object + if !ALLOC_SIDE_METADATA_SPEC.is_mapped(addr) { + return None; + } + + if ALLOC_SIDE_METADATA_SPEC.load::(addr) == 1 { + Some(potential_object) + } else { + None + } } +/// Bulk zero the alloc bit. pub fn bzero_alloc_bit(start: Address, size: usize) { ALLOC_SIDE_METADATA_SPEC.bzero_metadata(start, size); } diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index 8d97f3e635..7df40ce7dd 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -41,13 +41,12 @@ impl fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { let is_object = if ATOMIC_LOAD_ALLOC_BIT { - alloc_bit::is_alloced_object(self.cursor) + alloc_bit::is_alloced_object::(self.cursor) } else { - unsafe { alloc_bit::is_alloced_object_unsafe(self.cursor) } + unsafe { alloc_bit::is_alloced_object_unsafe::(self.cursor) } }; - if is_object { - let object = unsafe { self.cursor.to_object_reference() }; + if let Some(object) = is_object { self.cursor += S::size(object); return Some(object); } else { @@ -122,7 +121,7 @@ pub trait Region: Copy + PartialEq + PartialOrd { /// Return the region that contains the object (by its cell address). #[inline(always)] fn containing(object: ObjectReference) -> Self { - Self::from_unaligned_address(VM::VMObjectModel::ref_to_address(object)) + Self::from_unaligned_address(object.to_address::()) } } diff --git a/src/util/metadata/global.rs b/src/util/metadata/global.rs index 8cc2a2b49f..094f1ff332 100644 --- a/src/util/metadata/global.rs +++ b/src/util/metadata/global.rs @@ -48,7 +48,7 @@ impl MetadataSpec { mask: Option, ) -> T { match self { - MetadataSpec::OnSide(metadata_spec) => metadata_spec.load(object.to_address()), + MetadataSpec::OnSide(metadata_spec) => metadata_spec.load(object.to_address::()), MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::load_metadata::(metadata_spec, object, mask) } @@ -73,7 +73,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.load_atomic(object.to_address(), ordering) + metadata_spec.load_atomic(object.to_address::(), ordering) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::load_metadata_atomic::(metadata_spec, object, mask, ordering) @@ -100,7 +100,7 @@ impl MetadataSpec { ) { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.store(object.to_address(), val); + metadata_spec.store(object.to_address::(), val); } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::store_metadata::(metadata_spec, object, val, mask) @@ -126,7 +126,7 @@ impl MetadataSpec { ) { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.store_atomic(object.to_address(), val, ordering); + metadata_spec.store_atomic(object.to_address::(), val, ordering); } MetadataSpec::InHeader(metadata_spec) => VM::VMObjectModel::store_metadata_atomic::( metadata_spec, @@ -162,7 +162,7 @@ impl MetadataSpec { ) -> std::result::Result { match self { MetadataSpec::OnSide(metadata_spec) => metadata_spec.compare_exchange_atomic( - object.to_address(), + object.to_address::(), old_val, new_val, success_order, @@ -200,7 +200,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_add_atomic(object.to_address(), val, order) + metadata_spec.fetch_add_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_add_metadata::(metadata_spec, object, val, order) @@ -226,7 +226,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_sub_atomic(object.to_address(), val, order) + metadata_spec.fetch_sub_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_sub_metadata::(metadata_spec, object, val, order) @@ -252,7 +252,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_and_atomic(object.to_address(), val, order) + metadata_spec.fetch_and_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_and_metadata::(metadata_spec, object, val, order) @@ -278,7 +278,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_or_atomic(object.to_address(), val, order) + metadata_spec.fetch_or_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_or_metadata::(metadata_spec, object, val, order) @@ -309,9 +309,12 @@ impl MetadataSpec { f: F, ) -> std::result::Result { match self { - MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_update_atomic(object.to_address(), set_order, fetch_order, f) - } + MetadataSpec::OnSide(metadata_spec) => metadata_spec.fetch_update_atomic( + object.to_address::(), + set_order, + fetch_order, + f, + ), MetadataSpec::InHeader(metadata_spec) => VM::VMObjectModel::fetch_update_metadata( metadata_spec, object, diff --git a/src/util/metadata/header_metadata.rs b/src/util/metadata/header_metadata.rs index e73aed019b..beae3771b1 100644 --- a/src/util/metadata/header_metadata.rs +++ b/src/util/metadata/header_metadata.rs @@ -7,7 +7,6 @@ use std::sync::atomic::AtomicU8; use crate::util::constants::{BITS_IN_BYTE, LOG_BITS_IN_BYTE}; use crate::util::metadata::metadata_val_traits::*; use crate::util::Address; -use crate::util::ObjectReference; use num_traits::FromPrimitive; const LOG_BITS_IN_U16: usize = 4; @@ -69,8 +68,8 @@ impl HeaderMetadataSpec { } #[inline(always)] - fn meta_addr(&self, object: ObjectReference) -> Address { - object.to_address() + self.byte_offset() + fn meta_addr(&self, header: Address) -> Address { + header + self.byte_offset() } // Some common methods for header metadata that is smaller than 1 byte. @@ -120,29 +119,25 @@ impl HeaderMetadataSpec { /// # Safety /// This is a non-atomic load, thus not thread-safe. #[inline(always)] - pub unsafe fn load( - &self, - object: ObjectReference, - optional_mask: Option, - ) -> T { - self.load_inner::(object, optional_mask, None) + pub unsafe fn load(&self, header: Address, optional_mask: Option) -> T { + self.load_inner::(header, optional_mask, None) } /// This function provides a default implementation for the `load_metadata_atomic` method from the `ObjectModel` trait. #[inline(always)] pub fn load_atomic( &self, - object: ObjectReference, + header: Address, optional_mask: Option, ordering: Ordering, ) -> T { - self.load_inner::(object, optional_mask, Some(ordering)) + self.load_inner::(header, optional_mask, Some(ordering)) } #[inline(always)] fn load_inner( &self, - object: ObjectReference, + header: Address, optional_mask: Option, atomic_ordering: Option, ) -> T { @@ -156,9 +151,9 @@ impl HeaderMetadataSpec { let res: T = if self.num_of_bits < 8 { let byte_val = unsafe { if let Some(order) = atomic_ordering { - (self.meta_addr(object)).atomic_load::(order) + (self.meta_addr(header)).atomic_load::(order) } else { - (self.meta_addr(object)).load::() + (self.meta_addr(header)).load::() } }; @@ -166,9 +161,9 @@ impl HeaderMetadataSpec { } else { unsafe { if let Some(order) = atomic_ordering { - T::load_atomic(self.meta_addr(object), order) + T::load_atomic(self.meta_addr(header), order) } else { - (self.meta_addr(object)).load::() + (self.meta_addr(header)).load::() } } }; @@ -189,11 +184,11 @@ impl HeaderMetadataSpec { #[inline(always)] pub unsafe fn store( &self, - object: ObjectReference, + header: Address, val: T, optional_mask: Option, ) { - self.store_inner::(object, val, optional_mask, None) + self.store_inner::(header, val, optional_mask, None) } /// This function provides a default implementation for the `store_metadata_atomic` method from the `ObjectModel` trait. @@ -202,18 +197,18 @@ impl HeaderMetadataSpec { #[inline(always)] pub fn store_atomic( &self, - object: ObjectReference, + header: Address, val: T, optional_mask: Option, ordering: Ordering, ) { - self.store_inner::(object, val, optional_mask, Some(ordering)) + self.store_inner::(header, val, optional_mask, Some(ordering)) } #[inline(always)] fn store_inner( &self, - object: ObjectReference, + header: Address, val: T, optional_mask: Option, atomic_ordering: Option, @@ -227,7 +222,7 @@ impl HeaderMetadataSpec { // metadata smaller than 8-bits is special in that more than one metadata value may be included in one AtomicU8 operation, and extra shift and mask, and compare_exchange is required if self.num_of_bits < 8 { let val_u8 = val.to_u8().unwrap(); - let byte_addr = self.meta_addr(object); + let byte_addr = self.meta_addr(header); if let Some(order) = atomic_ordering { let _ = unsafe { ::fetch_update(byte_addr, order, order, |old_val: u8| { @@ -242,7 +237,7 @@ impl HeaderMetadataSpec { } } } else { - let addr = self.meta_addr(object); + let addr = self.meta_addr(header); unsafe { if let Some(order) = atomic_ordering { // if the optional mask is provided (e.g. for forwarding pointer), we need to use compare_exchange @@ -272,7 +267,7 @@ impl HeaderMetadataSpec { #[inline(always)] pub fn compare_exchange( &self, - object: ObjectReference, + header: Address, old_metadata: T, new_metadata: T, optional_mask: Option, @@ -283,7 +278,7 @@ impl HeaderMetadataSpec { self.assert_spec::(); // metadata smaller than 8-bits is special in that more than one metadata value may be included in one AtomicU8 operation, and extra shift and mask is required if self.num_of_bits < 8 { - let byte_addr = self.meta_addr(object); + let byte_addr = self.meta_addr(header); unsafe { let real_old_byte = byte_addr.atomic_load::(success_order); let expected_old_byte = @@ -301,7 +296,7 @@ impl HeaderMetadataSpec { .map_err(|x| FromPrimitive::from_u8(x).unwrap()) } } else { - let addr = self.meta_addr(object); + let addr = self.meta_addr(header); let (old_metadata, new_metadata) = if let Some(mask) = optional_mask { let old_byte = unsafe { T::load_atomic(addr, success_order) }; let expected_new_byte = old_byte.bitand(mask.inv()).bitor(new_metadata); @@ -328,12 +323,12 @@ impl HeaderMetadataSpec { #[inline(always)] fn fetch_ops_on_bits u8>( &self, - object: ObjectReference, + header: Address, set_order: Ordering, fetch_order: Ordering, update: F, ) -> u8 { - let byte_addr = self.meta_addr(object); + let byte_addr = self.meta_addr(header); let old_raw_byte = unsafe { ::fetch_update( byte_addr, @@ -353,52 +348,37 @@ impl HeaderMetadataSpec { /// This function provides a default implementation for the `fetch_add` method from the `ObjectModel` trait. #[inline(always)] - pub fn fetch_add( - &self, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { + pub fn fetch_add(&self, header: Address, val: T, order: Ordering) -> T { #[cfg(debug_assertions)] self.assert_spec::(); if self.num_of_bits < 8 { - FromPrimitive::from_u8(self.fetch_ops_on_bits(object, order, order, |x: u8| { + FromPrimitive::from_u8(self.fetch_ops_on_bits(header, order, order, |x: u8| { x.wrapping_add(val.to_u8().unwrap()) })) .unwrap() } else { - unsafe { T::fetch_add(self.meta_addr(object), val, order) } + unsafe { T::fetch_add(self.meta_addr(header), val, order) } } } /// This function provides a default implementation for the `fetch_sub` method from the `ObjectModel` trait. #[inline(always)] - pub fn fetch_sub( - &self, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { + pub fn fetch_sub(&self, header: Address, val: T, order: Ordering) -> T { #[cfg(debug_assertions)] self.assert_spec::(); if self.num_of_bits < 8 { - FromPrimitive::from_u8(self.fetch_ops_on_bits(object, order, order, |x: u8| { + FromPrimitive::from_u8(self.fetch_ops_on_bits(header, order, order, |x: u8| { x.wrapping_sub(val.to_u8().unwrap()) })) .unwrap() } else { - unsafe { T::fetch_sub(self.meta_addr(object), val, order) } + unsafe { T::fetch_sub(self.meta_addr(header), val, order) } } } /// This function provides a default implementation for the `fetch_and` method from the `ObjectModel` trait. #[inline(always)] - pub fn fetch_and( - &self, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { + pub fn fetch_and(&self, header: Address, val: T, order: Ordering) -> T { #[cfg(debug_assertions)] self.assert_spec::(); if self.num_of_bits < 8 { @@ -406,22 +386,17 @@ impl HeaderMetadataSpec { let new_val = (val.to_u8().unwrap() << lshift) | !mask; // We do not need to use fetch_ops_on_bits(), we can just set irrelavent bits to 1, and do fetch_and let old_raw_byte = - unsafe { ::fetch_and(self.meta_addr(object), new_val, order) }; + unsafe { ::fetch_and(self.meta_addr(header), new_val, order) }; let old_val = self.get_bits_from_u8(old_raw_byte); FromPrimitive::from_u8(old_val).unwrap() } else { - unsafe { T::fetch_and(self.meta_addr(object), val, order) } + unsafe { T::fetch_and(self.meta_addr(header), val, order) } } } /// This function provides a default implementation for the `fetch_or` method from the `ObjectModel` trait. #[inline(always)] - pub fn fetch_or( - &self, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { + pub fn fetch_or(&self, header: Address, val: T, order: Ordering) -> T { #[cfg(debug_assertions)] self.assert_spec::(); if self.num_of_bits < 8 { @@ -429,28 +404,28 @@ impl HeaderMetadataSpec { let new_val = (val.to_u8().unwrap() << lshift) & mask; // We do not need to use fetch_ops_on_bits(), we can just set irrelavent bits to 0, and do fetch_or let old_raw_byte = - unsafe { ::fetch_or(self.meta_addr(object), new_val, order) }; + unsafe { ::fetch_or(self.meta_addr(header), new_val, order) }; let old_val = self.get_bits_from_u8(old_raw_byte); FromPrimitive::from_u8(old_val).unwrap() } else { - unsafe { T::fetch_or(self.meta_addr(object), val, order) } + unsafe { T::fetch_or(self.meta_addr(header), val, order) } } } /// This function provides a default implementation for the `fetch_update` method from the `ObjectModel` trait. /// The semantics is the same as Rust's `fetch_update` on atomic types. #[inline(always)] - pub fn fetch_update( + pub fn fetch_update Option + Copy>( &self, - object: ObjectReference, + header: Address, set_order: Ordering, fetch_order: Ordering, - mut f: impl FnMut(T) -> Option + Copy, + mut f: F, ) -> std::result::Result { #[cfg(debug_assertions)] self.assert_spec::(); if self.num_of_bits < 8 { - let byte_addr = self.meta_addr(object); + let byte_addr = self.meta_addr(header); unsafe { ::fetch_update( byte_addr, @@ -468,7 +443,7 @@ impl HeaderMetadataSpec { .map(|raw_byte| FromPrimitive::from_u8(self.get_bits_from_u8(raw_byte)).unwrap()) .map_err(|raw_byte| FromPrimitive::from_u8(self.get_bits_from_u8(raw_byte)).unwrap()) } else { - unsafe { T::fetch_update(self.meta_addr(object), set_order, fetch_order, f) } + unsafe { T::fetch_update(self.meta_addr(header), set_order, fetch_order, f) } } } } @@ -656,7 +631,7 @@ mod tests { macro_rules! impl_with_object { ($type: ty) => { paste!{ - fn [](f: F) where F: FnOnce(ObjectReference, *mut $type) + std::panic::UnwindSafe { + fn [](f: F) where F: FnOnce(Address, *mut $type) + std::panic::UnwindSafe { // Allocate a tuple that can hold 3 integers let ty_size = ($type::BITS >> LOG_BITS_IN_BYTE) as usize; let layout = std::alloc::Layout::from_size_align(ty_size * 3, ty_size).unwrap(); @@ -668,7 +643,7 @@ mod tests { assert_eq!(unsafe { *(ptr_mid.offset(-1)) }, 0, "memory at offset -1 is not zero"); assert_eq!(unsafe { *ptr_mid }, 0, "memory at offset 0 is not zero"); assert_eq!(unsafe { *(ptr_mid.offset(1)) }, 0, "memory at offset 1 is not zero"); - (unsafe { Address::from_ptr(ptr_mid).to_object_reference() }, ptr_mid) + (Address::from_ptr(ptr_mid), ptr_mid) }; crate::util::test_util::with_cleanup( || f(obj, ptr), diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 8b83830554..672dcaa98f 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -1,7 +1,7 @@ use crate::util::copy::*; use crate::util::metadata::MetadataSpec; /// https://github.com/JikesRVM/JikesRVM/blob/master/MMTk/src/org/mmtk/utility/ForwardingWord.java -use crate::util::{constants, Address, ObjectReference}; +use crate::util::{constants, ObjectReference}; use crate::vm::ObjectModel; use crate::vm::VMBinding; use std::sync::atomic::Ordering; @@ -82,11 +82,11 @@ pub fn forward_object( ) -> ObjectReference { let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(new_object); + crate::util::alloc_bit::set_alloc_bit::(new_object); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, - new_object.to_address().as_usize() | ((FORWARDED as usize) << shift), + new_object.to_raw_address().as_usize() | ((FORWARDED as usize) << shift), None, Ordering::SeqCst, ) @@ -99,11 +99,6 @@ pub fn forward_object( Ordering::SeqCst, ); } - #[cfg(debug_assertions)] - { - use crate::policy::sft_map::SFTMap; - crate::mmtk::SFT_MAP.assert_valid_entries_for_object::(new_object); - } new_object } @@ -157,15 +152,15 @@ pub fn read_forwarding_pointer(object: ObjectReference) -> Object object, ); + // We write the forwarding poiner. We know it is an object reference. unsafe { - Address::from_usize( + ObjectReference::from_raw_address(crate::util::Address::from_usize( VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.load_atomic::( object, Some(FORWARDING_POINTER_MASK), Ordering::SeqCst, ), - ) - .to_object_reference() + )) } } @@ -185,7 +180,7 @@ pub fn write_forwarding_pointer( trace!("GCForwardingWord::write({:#?}, {:x})\n", object, new_object); VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, - new_object.to_address().as_usize(), + new_object.to_raw_address().as_usize(), Some(FORWARDING_POINTER_MASK), Ordering::SeqCst, ) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index cc8b2ae8d4..1886f83fd0 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -405,7 +405,7 @@ impl ReferenceProcessor { if !::VMReferenceGlue::is_referent_cleared(referent) { Self::keep_referent_alive(trace, referent); } - trace!(" ~> {:?} (retained)", referent.to_address()); + trace!(" ~> {:?} (retained)", referent); } debug!("Ending ReferenceProcessor.retain({:?})", self.semantics); diff --git a/src/util/sanity/memory_scan.rs b/src/util/sanity/memory_scan.rs deleted file mode 100644 index acb7375744..0000000000 --- a/src/util/sanity/memory_scan.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::util::Address; -use crate::util::ObjectReference; - -// This is legacy code, and no one is using this. Using gdb can achieve the same thing for debugging. -// The JikesRVM binding still declares this method and we need to remove it from JikesRVM. -#[deprecated] -pub fn scan_region() { - loop { - let mut buf = String::new(); - println!("start end "); - let bytes = std::io::stdin().read_line(&mut buf).unwrap(); - let mut iter = buf.split_whitespace(); - let start = iter.next(); - let end = iter.next(); - let value = iter.next(); - if start.is_none() || bytes == 0 { - break; - } - let mut start = usize::from_str_radix(&start.unwrap()[2..], 16).unwrap(); - let end = usize::from_str_radix(&end.unwrap()[2..], 16).unwrap(); - - while start < end { - let slot = unsafe { Address::from_usize(start) }; - let object: ObjectReference = unsafe { slot.load() }; - if let Some(value) = value { - let value = usize::from_str_radix(&value[2..], 16).unwrap(); - if object.to_address() == unsafe { Address::from_usize(value) } { - println!("{} REF: {}", slot, object); - } - } else if !object.is_sane() { - println!("{} REF: {}", slot, object); - } - // FIXME steveb Consider VM-specific integrity check on reference. - start += std::mem::size_of::(); - } - } -} diff --git a/src/util/sanity/mod.rs b/src/util/sanity/mod.rs index 8c8f15b53c..d45a1a9fa1 100644 --- a/src/util/sanity/mod.rs +++ b/src/util/sanity/mod.rs @@ -1,2 +1 @@ -pub mod memory_scan; pub mod sanity_checker; diff --git a/src/util/treadmill.rs b/src/util/treadmill.rs index aa7ce94919..391256a432 100644 --- a/src/util/treadmill.rs +++ b/src/util/treadmill.rs @@ -2,13 +2,13 @@ use std::collections::HashSet; use std::mem::swap; use std::sync::Mutex; -use crate::util::Address; +use crate::util::ObjectReference; pub struct TreadMill { - from_space: Mutex>, - to_space: Mutex>, - collect_nursery: Mutex>, - alloc_nursery: Mutex>, + from_space: Mutex>, + to_space: Mutex>, + collect_nursery: Mutex>, + alloc_nursery: Mutex>, } impl std::fmt::Debug for TreadMill { @@ -32,17 +32,17 @@ impl TreadMill { } } - pub fn add_to_treadmill(&self, cell: Address, nursery: bool) { + pub fn add_to_treadmill(&self, object: ObjectReference, nursery: bool) { if nursery { // println!("+ an {}", cell); - self.alloc_nursery.lock().unwrap().insert(cell); + self.alloc_nursery.lock().unwrap().insert(object); } else { // println!("+ ts {}", cell); - self.to_space.lock().unwrap().insert(cell); + self.to_space.lock().unwrap().insert(object); } } - pub fn collect_nursery(&self) -> Vec
{ + pub fn collect_nursery(&self) -> Vec { let mut guard = self.collect_nursery.lock().unwrap(); let vals = guard.iter().copied().collect(); guard.clear(); @@ -50,7 +50,7 @@ impl TreadMill { vals } - pub fn collect(&self) -> Vec
{ + pub fn collect(&self) -> Vec { let mut guard = self.from_space.lock().unwrap(); let vals = guard.iter().copied().collect(); guard.clear(); @@ -58,27 +58,25 @@ impl TreadMill { vals } - pub fn copy(&self, cell: Address, is_in_nursery: bool) { + pub fn copy(&self, object: ObjectReference, is_in_nursery: bool) { if is_in_nursery { let mut guard = self.collect_nursery.lock().unwrap(); debug_assert!( - guard.contains(&cell), - "copy source cell ({}) must be in collect_nursery", - cell + guard.contains(&object), + "copy source object ({}) must be in collect_nursery", + object ); - guard.remove(&cell); - // println!("cn -> ts {}", cell); + guard.remove(&object); } else { let mut guard = self.from_space.lock().unwrap(); debug_assert!( - guard.contains(&cell), - "copy source cell ({}) must be in from_space", - cell + guard.contains(&object), + "copy source object ({}) must be in from_space", + object ); - guard.remove(&cell); - // println!("fs -> ts {}", cell); + guard.remove(&object); } - self.to_space.lock().unwrap().insert(cell); + self.to_space.lock().unwrap().insert(object); } pub fn is_to_space_empty(&self) -> bool { diff --git a/src/vm/edge_shape.rs b/src/vm/edge_shape.rs index 629a5467e6..7ebd4c0bed 100644 --- a/src/vm/edge_shape.rs +++ b/src/vm/edge_shape.rs @@ -24,7 +24,7 @@ use crate::util::{Address, ObjectReference}; /// or some arbitrary offset) for some reasons. /// /// When loading, `Edge::load` shall decode its internal representation to a "regular" -/// `ObjectReference` which is applicable to `ObjectModel::object_start_ref`. The implementation +/// `ObjectReference`. The implementation /// can do this with any appropriate operations, usually shifting and masking bits or subtracting /// offset from the address. By doing this conversion, MMTk can implement GC algorithms in a /// VM-neutral way, knowing only `ObjectReference`. diff --git a/src/vm/object_model.rs b/src/vm/object_model.rs index 835c0e471a..a7f535d24d 100644 --- a/src/vm/object_model.rs +++ b/src/vm/object_model.rs @@ -102,7 +102,7 @@ pub trait ObjectModel { object: ObjectReference, mask: Option, ) -> T { - metadata_spec.load::(object, mask) + metadata_spec.load::(object.to_header::(), mask) } /// A function to atomically load the specified per-object metadata's content. @@ -122,7 +122,7 @@ pub trait ObjectModel { mask: Option, ordering: Ordering, ) -> T { - metadata_spec.load_atomic::(object, mask, ordering) + metadata_spec.load_atomic::(object.to_header::(), mask, ordering) } /// A function to non-atomically store a value to the specified per-object metadata. @@ -144,7 +144,7 @@ pub trait ObjectModel { val: T, mask: Option, ) { - metadata_spec.store::(object, val, mask) + metadata_spec.store::(object.to_header::(), val, mask) } /// A function to atomically store a value to the specified per-object metadata. @@ -165,7 +165,7 @@ pub trait ObjectModel { mask: Option, ordering: Ordering, ) { - metadata_spec.store_atomic::(object, val, mask, ordering) + metadata_spec.store_atomic::(object.to_header::(), val, mask, ordering) } /// A function to atomically compare-and-exchange the specified per-object metadata's content. @@ -192,7 +192,7 @@ pub trait ObjectModel { failure_order: Ordering, ) -> std::result::Result { metadata_spec.compare_exchange::( - object, + object.to_header::(), old_val, new_val, mask, @@ -219,7 +219,7 @@ pub trait ObjectModel { val: T, order: Ordering, ) -> T { - metadata_spec.fetch_add::(object, val, order) + metadata_spec.fetch_add::(object.to_header::(), val, order) } /// A function to atomically perform a subtract operation on the specified per-object metadata's content. @@ -240,7 +240,7 @@ pub trait ObjectModel { val: T, order: Ordering, ) -> T { - metadata_spec.fetch_sub::(object, val, order) + metadata_spec.fetch_sub::(object.to_header::(), val, order) } /// A function to atomically perform a bit-and operation on the specified per-object metadata's content. @@ -260,7 +260,7 @@ pub trait ObjectModel { val: T, order: Ordering, ) -> T { - metadata_spec.fetch_and::(object, val, order) + metadata_spec.fetch_and::(object.to_header::(), val, order) } /// A function to atomically perform a bit-or operation on the specified per-object metadata's content. @@ -280,7 +280,7 @@ pub trait ObjectModel { val: T, order: Ordering, ) -> T { - metadata_spec.fetch_or::(object, val, order) + metadata_spec.fetch_or::(object.to_header::(), val, order) } /// A function to atomically perform an update operation on the specified per-object metadata's content. @@ -303,7 +303,7 @@ pub trait ObjectModel { fetch_order: Ordering, f: F, ) -> std::result::Result { - metadata_spec.fetch_update(object, set_order, fetch_order, f) + metadata_spec.fetch_update::(object.to_header::(), set_order, fetch_order, f) } /// Copy an object and return the address of the new object. Usually in the implementation of this method, @@ -379,29 +379,49 @@ pub trait ObjectModel { /// mature space for generational plans. const VM_WORST_CASE_COPY_EXPANSION: f64 = 1.5; - /// For our allocation result `[cell, cell + bytes)`, if a binding's - /// definition of `ObjectReference` may point outside the cell (i.e. `object_ref >= cell + bytes`), - /// the binding needs to provide a `Some` value for this constant and - /// the value is the maximum of `object_ref - cell`. If a binding's - /// `ObjectReference` always points to an address in the cell (i.e. `[cell, cell + bytes)`), - /// they can leave this as `None`. - /// MMTk allocators use this value to make sure that the metadata for object reference is properly set. - const OBJECT_REF_OFFSET_BEYOND_CELL: Option = None; + /// Return the lowest address of the storage associated with an object. This should be + /// the address that a binding gets by an allocation call ([`crate::memory_manager::alloc`]). + /// + /// Arguments: + /// * `object`: The object to be queried. It should not be null. + fn ref_to_object_start(object: ObjectReference) -> Address; - /// Return the lowest address of the storage associated with an object. + /// Return the header base address from an object reference. Any object header metadata + /// in the [`crate::vm::ObjectModel`] declares a piece of header metadata with an offset + /// from this address. If a binding does not use any header metadata for MMTk, this method + /// will not be called, and the binding can simply use `unreachable!()` for the method. /// /// Arguments: - /// * `object`: The object to be queried. - fn object_start_ref(object: ObjectReference) -> Address; + /// * `object`: The object to be queried. It should not be null. + fn ref_to_header(object: ObjectReference) -> Address; /// Return an address guaranteed to be inside the storage associated - /// with an object. + /// with an object. The returned address needs to be deterministic + /// for an given object. For a given object, the returned address + /// should be a constant offset from the object reference address. + /// + /// If a binding enables the `is_mmtk_object` feature, MMTk may forge the queried address + /// directly into a potential object reference, and call this method on the 'object reference'. + /// In that case, the argument `object` may not be a valid object reference, + /// and the implementation of this method should not use any object metadata. + /// However, if a binding, does not use the`is_mmtk_object` feature, they can expect + /// the `object` to be valid. + /// + /// MMTk uses this method more frequently than [`crate::vm::ObjectModel::ref_to_object_start`]. /// /// Arguments: - /// * `object`: The object to be queried. - // FIXME: this doesn't seem essential. E.g. `get_object_end_address` or `object_start_ref` can cover its functionality. + /// * `object`: The object to be queried. It should not be null. fn ref_to_address(object: ObjectReference) -> Address; + /// Return an object for a given address returned by `ref_to_address()`. + /// This does exactly the opposite of `ref_to_address()`. The argument `addr` has + /// to be an address that is previously returned from `ref_to_address()`. Invoking this method + /// with an unexpected address is undefined behavior. + /// + /// Arguments: + /// * `addr`: An address that is returned from `ref_to_address()` + fn address_to_ref(addr: Address) -> ObjectReference; + /// Dump debugging information for an object. /// /// Arguments: diff --git a/src/vm/reference_glue.rs b/src/vm/reference_glue.rs index b36d720e90..5245d8cc48 100644 --- a/src/vm/reference_glue.rs +++ b/src/vm/reference_glue.rs @@ -1,4 +1,3 @@ -use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::VMBinding; @@ -25,9 +24,7 @@ pub trait ReferenceGlue { /// Arguments: /// * `new_reference`: The reference whose referent is to be cleared. fn clear_referent(new_reference: ObjectReference) { - Self::set_referent(new_reference, unsafe { - Address::zero().to_object_reference() - }); + Self::set_referent(new_reference, ObjectReference::NULL); } /// Get the referent from a weak reference object. diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index d68508c7bf..8b0ec106ff 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -115,7 +115,7 @@ pub extern "C" fn mmtk_is_mmtk_object(addr: Address) -> bool { #[no_mangle] pub extern "C" fn mmtk_is_in_mmtk_spaces(object: ObjectReference) -> bool { - memory_manager::is_in_mmtk_spaces(object) + memory_manager::is_in_mmtk_spaces::(object) } #[no_mangle] diff --git a/vmbindings/dummyvm/src/edges.rs b/vmbindings/dummyvm/src/edges.rs index 95f1f21823..f38ffc192f 100644 --- a/vmbindings/dummyvm/src/edges.rs +++ b/vmbindings/dummyvm/src/edges.rs @@ -132,11 +132,11 @@ pub mod only_64_bit { fn load(&self) -> ObjectReference { let compressed = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let expanded = (compressed as usize) << 3; - unsafe { Address::from_usize(expanded).to_object_reference() } + ObjectReference::from_raw_address(unsafe { Address::from_usize(expanded) }) } fn store(&self, object: ObjectReference) { - let expanded = object.to_address().as_usize(); + let expanded = object.to_raw_address().as_usize(); let compressed = (expanded >> 3) as u32; unsafe { (*self.slot_addr).store(compressed, atomic::Ordering::Relaxed) } } @@ -182,11 +182,11 @@ impl Edge for OffsetEdge { fn load(&self) -> ObjectReference { let middle = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let begin = middle - self.offset; - unsafe { begin.to_object_reference() } + ObjectReference::from_raw_address(begin) } fn store(&self, object: ObjectReference) { - let begin = object.to_address(); + let begin = object.to_raw_address(); let middle = begin + self.offset; unsafe { (*self.slot_addr).store(middle, atomic::Ordering::Relaxed) } } @@ -217,12 +217,12 @@ impl Edge for TaggedEdge { fn load(&self) -> ObjectReference { let tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let untagged = tagged & !Self::TAG_BITS_MASK; - unsafe { Address::from_usize(untagged).to_object_reference() } + ObjectReference::from_raw_address(unsafe { Address::from_usize(untagged) }) } fn store(&self, object: ObjectReference) { let old_tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; - let new_untagged = object.to_address().as_usize(); + let new_untagged = object.to_raw_address().as_usize(); let new_tagged = new_untagged | (old_tagged & Self::TAG_BITS_MASK); unsafe { (*self.slot_addr).store(new_tagged, atomic::Ordering::Relaxed) } } diff --git a/vmbindings/dummyvm/src/object_model.rs b/vmbindings/dummyvm/src/object_model.rs index 05871ce170..aff3545601 100644 --- a/vmbindings/dummyvm/src/object_model.rs +++ b/vmbindings/dummyvm/src/object_model.rs @@ -52,12 +52,21 @@ impl ObjectModel for VMObjectModel { unimplemented!() } - fn object_start_ref(object: ObjectReference) -> Address { - object.to_address().sub(OBJECT_REF_OFFSET) + fn ref_to_object_start(object: ObjectReference) -> Address { + object.to_raw_address().sub(OBJECT_REF_OFFSET) } - fn ref_to_address(_object: ObjectReference) -> Address { - unimplemented!() + fn ref_to_header(object: ObjectReference) -> Address { + object.to_raw_address() + } + + fn ref_to_address(object: ObjectReference) -> Address { + // Just use object start. + Self::ref_to_object_start(object) + } + + fn address_to_ref(addr: Address) -> ObjectReference { + ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)) } fn dump_object(_object: ObjectReference) { diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index d66de2d6fa..57288b00fa 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -54,7 +54,7 @@ pub fn null() { SINGLE_OBJECT.with_fixture(|fixture| { let addr = Address::ZERO; assert_filter_fail(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); }); } @@ -66,7 +66,7 @@ pub fn too_small() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SMALL_OFFSET { let addr = Address::ZERO + offset; - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); } }); } @@ -75,7 +75,7 @@ pub fn too_small() { pub fn max() { SINGLE_OBJECT.with_fixture(|fixture| { let addr = Address::MAX; - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); }); } @@ -84,7 +84,7 @@ pub fn too_big() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SMALL_OFFSET { let addr = Address::MAX - offset; - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); } }); } @@ -92,7 +92,7 @@ pub fn too_big() { #[test] pub fn direct_hit() { SINGLE_OBJECT.with_fixture(|fixture| { - let addr = fixture.objref.to_address(); + let addr = fixture.objref.to_raw_address(); assert_filter_pass(addr); assert_valid_objref(addr); }); @@ -104,9 +104,9 @@ const SEVERAL_PAGES: usize = 4 * mmtk::util::constants::BYTES_IN_PAGE; pub fn small_offsets() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SEVERAL_PAGES { - let addr = fixture.objref.to_address() + offset; + let addr = fixture.objref.to_raw_address() + offset; if basic_filter(addr) { - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); } } }); @@ -117,9 +117,9 @@ pub fn medium_offsets_aligned() { SINGLE_OBJECT.with_fixture(|fixture| { let alignment = std::mem::align_of::
(); for offset in (alignment..(alignment * SEVERAL_PAGES)).step_by(alignment) { - let addr = fixture.objref.to_address() + offset; + let addr = fixture.objref.to_raw_address() + offset; assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); } }); } @@ -129,12 +129,12 @@ pub fn large_offsets_aligned() { SINGLE_OBJECT.with_fixture(|fixture| { for log_offset in 12usize..(usize::BITS as usize) { let offset = 1usize << log_offset; - let addr = match fixture.objref.to_address().as_usize().checked_add(offset) { + let addr = match fixture.objref.to_raw_address().as_usize().checked_add(offset) { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); } }); } @@ -144,13 +144,13 @@ pub fn negative_offsets() { SINGLE_OBJECT.with_fixture(|fixture| { for log_offset in LOG_BITS_IN_WORD..(usize::BITS as usize) { let offset = 1usize << log_offset; - let addr = match fixture.objref.to_address().as_usize().checked_sub(offset) { + let addr = match fixture.objref.to_raw_address().as_usize().checked_sub(offset) { Some(0) => break, Some(n) => unsafe { Address::from_usize(n) }, None => break, }; assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); } }); } diff --git a/vmbindings/dummyvm/src/tests/edges_test.rs b/vmbindings/dummyvm/src/tests/edges_test.rs index 00da47292f..8b15c28503 100644 --- a/vmbindings/dummyvm/src/tests/edges_test.rs +++ b/vmbindings/dummyvm/src/tests/edges_test.rs @@ -57,7 +57,7 @@ mod only_64_bit { // Note: We cannot guarantee GC will allocate an object in the low address region. // So we make up addresses just for testing the bit operations of compressed OOP edges. let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; - let objref1 = unsafe { Address::from_usize(COMPRESSABLE_ADDR1).to_object_reference() }; + let objref1 = ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR1) }); let mut slot: Atomic = Atomic::new(compressed1); @@ -73,7 +73,7 @@ mod only_64_bit { // So we make up addresses just for testing the bit operations of compressed OOP edges. let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; let compressed2 = (COMPRESSABLE_ADDR2 >> 3) as u32; - let objref2 = unsafe { Address::from_usize(COMPRESSABLE_ADDR2).to_object_reference() }; + let objref2 = ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR2) }); let mut slot: Atomic = Atomic::new(compressed1); @@ -90,7 +90,7 @@ mod only_64_bit { pub fn load_offset() { const OFFSET: usize = 48; FIXTURE.with_fixture(|fixture| { - let addr1 = fixture.objref1.to_address(); + let addr1 = fixture.objref1.to_raw_address(); let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); let edge = OffsetEdge::new_with_offset(Address::from_ref(&mut slot), OFFSET); @@ -104,8 +104,8 @@ pub fn load_offset() { pub fn store_offset() { const OFFSET: usize = 48; FIXTURE.with_fixture(|fixture| { - let addr1 = fixture.objref1.to_address(); - let addr2 = fixture.objref2.to_address(); + let addr1 = fixture.objref1.to_raw_address(); + let addr2 = fixture.objref2.to_raw_address(); let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); let edge = OffsetEdge::new_with_offset(Address::from_ref(&mut slot), OFFSET); @@ -123,8 +123,8 @@ const TAG2: usize = 0b10; #[test] pub fn load_tagged() { FIXTURE.with_fixture(|fixture| { - let mut slot1: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG1); - let mut slot2: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG2); + let mut slot1: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); + let mut slot2: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); let edge1 = TaggedEdge::new(Address::from_ref(&mut slot1)); let edge2 = TaggedEdge::new(Address::from_ref(&mut slot2)); @@ -140,8 +140,8 @@ pub fn load_tagged() { #[test] pub fn store_tagged() { FIXTURE.with_fixture(|fixture| { - let mut slot1: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG1); - let mut slot2: Atomic = Atomic::new(fixture.objref1.to_address().as_usize() | TAG2); + let mut slot1: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); + let mut slot2: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); let edge1 = TaggedEdge::new(Address::from_ref(&mut slot1)); let edge2 = TaggedEdge::new(Address::from_ref(&mut slot2)); @@ -151,11 +151,11 @@ pub fn store_tagged() { // Tags should be preserved. assert_eq!( slot1.load(Ordering::SeqCst), - fixture.objref2.to_address().as_usize() | TAG1 + fixture.objref2.to_raw_address().as_usize() | TAG1 ); assert_eq!( slot2.load(Ordering::SeqCst), - fixture.objref2.to_address().as_usize() | TAG2 + fixture.objref2.to_raw_address().as_usize() | TAG2 ); let objref1 = edge1.load(); @@ -172,8 +172,8 @@ pub fn mixed() { const OFFSET: usize = 48; FIXTURE.with_fixture(|fixture| { - let addr1 = fixture.objref1.to_address(); - let addr2 = fixture.objref2.to_address(); + let addr1 = fixture.objref1.to_raw_address(); + let addr2 = fixture.objref2.to_raw_address(); let mut slot1: Atomic = Atomic::new(fixture.objref1); let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); diff --git a/vmbindings/dummyvm/src/tests/fixtures/mod.rs b/vmbindings/dummyvm/src/tests/fixtures/mod.rs index 4d70d3b242..9f95eef12c 100644 --- a/vmbindings/dummyvm/src/tests/fixtures/mod.rs +++ b/vmbindings/dummyvm/src/tests/fixtures/mod.rs @@ -86,7 +86,7 @@ impl FixtureContent for SingleObject { let addr = mmtk_alloc(handle, size, 8, 0, semantics); assert!(!addr.is_zero()); - let objref = unsafe { addr.add(OBJECT_REF_OFFSET).to_object_reference() }; + let objref = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); mmtk_post_alloc(handle, objref, size, semantics); SingleObject { objref } @@ -131,10 +131,10 @@ impl FixtureContent for TwoObjects { let addr = mmtk_alloc(handle, size, 8, 0, semantics); assert!(!addr.is_zero()); - let objref1 = unsafe { addr.add(OBJECT_REF_OFFSET).to_object_reference() }; + let objref1 = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); mmtk_post_alloc(handle, objref1, size, semantics); - let objref2 = unsafe { addr.add(OBJECT_REF_OFFSET).to_object_reference() }; + let objref2 = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); mmtk_post_alloc(handle, objref2, size, semantics); TwoObjects { objref1, objref2 } diff --git a/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs b/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs index d5605b8e2d..2e69e120d6 100644 --- a/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs +++ b/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs @@ -1,7 +1,7 @@ // GITHUB-CI: MMTK_PLAN=all use crate::tests::fixtures::{Fixture, SingleObject}; -use mmtk::memory_manager::is_in_mmtk_spaces; +use crate::api::mmtk_is_in_mmtk_spaces as is_in_mmtk_spaces; use mmtk::util::*; lazy_static! { @@ -12,7 +12,7 @@ lazy_static! { pub fn null() { SINGLE_OBJECT.with_fixture(|_fixture| { assert!( - !is_in_mmtk_spaces(unsafe { Address::ZERO.to_object_reference() }), + !is_in_mmtk_spaces(ObjectReference::NULL), "NULL pointer should not be in any MMTk spaces." ); }); @@ -22,7 +22,7 @@ pub fn null() { pub fn max() { SINGLE_OBJECT.with_fixture(|_fixture| { assert!( - !is_in_mmtk_spaces(unsafe { Address::MAX.to_object_reference() }), + !is_in_mmtk_spaces(ObjectReference::from_raw_address(Address::MAX)), "Address::MAX should not be in any MMTk spaces." ); }); @@ -43,13 +43,13 @@ pub fn large_offsets_aligned() { SINGLE_OBJECT.with_fixture(|fixture| { for log_offset in 12usize..(usize::BITS as usize) { let offset = 1usize << log_offset; - let addr = match fixture.objref.to_address().as_usize().checked_add(offset) { + let addr = match fixture.objref.to_raw_address().as_usize().checked_add(offset) { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; // It's just a smoke test. It is hard to predict if the addr is still in any space, // but it must not crash. - let _ = is_in_mmtk_spaces(unsafe { addr.to_object_reference() }); + let _ = is_in_mmtk_spaces(ObjectReference::from_raw_address(addr)); } }); } @@ -59,13 +59,13 @@ pub fn negative_offsets() { SINGLE_OBJECT.with_fixture(|fixture| { for log_offset in 1usize..(usize::BITS as usize) { let offset = 1usize << log_offset; - let addr = match fixture.objref.to_address().as_usize().checked_sub(offset) { + let addr = match fixture.objref.to_raw_address().as_usize().checked_sub(offset) { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; // It's just a smoke test. It is hard to predict if the addr is still in any space, // but it must not crash. - let _ = is_in_mmtk_spaces(unsafe { addr.to_object_reference() }); + let _ = is_in_mmtk_spaces(ObjectReference::from_raw_address(addr)); } }); }