diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc index 9a5733bc0661..185cb1b53780 100644 --- a/src/heap/incremental-marking.cc +++ b/src/heap/incremental-marking.cc @@ -32,7 +32,9 @@ IncrementalMarking::IncrementalMarking(Heap* heap) was_activated_(false), black_allocation_(false), finalize_marking_completed_(false), - request_type_(NONE) {} + request_type_(NONE), + new_generation_observer_(*this, kAllocatedThreshold), + old_generation_observer_(*this, kAllocatedThreshold) {} bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { HeapObject* value_heap_obj = HeapObject::cast(value); @@ -487,6 +489,16 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) { state_ = SWEEPING; } + SpaceIterator it(heap_); + while (it.has_next()) { + Space* space = it.next(); + if (space == heap_->new_space()) { + space->AddAllocationObserver(&new_generation_observer_); + } else { + space->AddAllocationObserver(&old_generation_observer_); + } + } + incremental_marking_job()->Start(heap_); } @@ -945,6 +957,16 @@ void IncrementalMarking::Stop() { Max(0, old_generation_size_mb - old_generation_limit_mb)); } + SpaceIterator it(heap_); + while (it.has_next()) { + Space* space = it.next(); + if (space == heap_->new_space()) { + space->RemoveAllocationObserver(&new_generation_observer_); + } else { + space->RemoveAllocationObserver(&old_generation_observer_); + } + } + IncrementalMarking::set_should_hurry(false); if (IsMarking()) { PatchIncrementalMarkingRecordWriteStubs(heap_, @@ -1063,33 +1085,30 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() { return; } - size_t bytes_to_process = StepSizeToKeepUpWithAllocations(); - - if (bytes_to_process < IncrementalMarking::kAllocatedThreshold) { - return; - } - - bytes_to_process += StepSizeToMakeProgress(); - - // The first step after Scavenge will see many allocated bytes. - // Cap the step size to distribute the marking work more uniformly. - size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize( - kMaxStepSizeInMs, - heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); - bytes_to_process = Min(bytes_to_process, max_step_size); - - size_t bytes_processed = 0; - if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) { - // Steps performed in tasks have put us ahead of schedule. - // We skip processing of marking dequeue here and thus - // shift marking time from inside V8 to standalone tasks. - bytes_marked_ahead_of_schedule_ -= bytes_to_process; - bytes_processed = bytes_to_process; - } else { - bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD, - FORCE_COMPLETION, StepOrigin::kV8); + size_t bytes_to_process = + StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress(); + + if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) { + // The first step after Scavenge will see many allocated bytes. + // Cap the step size to distribute the marking work more uniformly. + size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize( + kMaxStepSizeInMs, + heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); + bytes_to_process = Min(bytes_to_process, max_step_size); + + size_t bytes_processed = 0; + if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) { + // Steps performed in tasks have put us ahead of schedule. + // We skip processing of marking dequeue here and thus + // shift marking time from inside V8 to standalone tasks. + bytes_marked_ahead_of_schedule_ -= bytes_to_process; + bytes_processed = bytes_to_process; + } else { + bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD, + FORCE_COMPLETION, StepOrigin::kV8); + } + bytes_allocated_ -= Min(bytes_allocated_, bytes_processed); } - bytes_allocated_ -= Min(bytes_allocated_, bytes_processed); } size_t IncrementalMarking::Step(size_t bytes_to_process, diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h index 7f255244b508..c2290c4d8206 100644 --- a/src/heap/incremental-marking.h +++ b/src/heap/incremental-marking.h @@ -99,7 +99,6 @@ class IncrementalMarking { CompletionAction completion_action, ForceCompletionAction force_completion, StepOrigin step_origin); - void AdvanceIncrementalMarkingOnAllocation(); // It's hard to know how much work the incremental marker should do to make // progress in the face of the mutator creating new work for it. We start @@ -219,6 +218,20 @@ class IncrementalMarking { void AbortBlackAllocation(); private: + class Observer : public AllocationObserver { + public: + Observer(IncrementalMarking& incremental_marking, intptr_t step_size) + : AllocationObserver(step_size), + incremental_marking_(incremental_marking) {} + + void Step(int bytes_allocated, Address, size_t) override { + incremental_marking_.AdvanceIncrementalMarkingOnAllocation(); + } + + private: + IncrementalMarking& incremental_marking_; + }; + int64_t SpaceLeftInOldSpace(); void StartMarking(); @@ -256,6 +269,8 @@ class IncrementalMarking { void IncrementIdleMarkingDelayCounter(); + void AdvanceIncrementalMarkingOnAllocation(); + size_t StepSizeToKeepUpWithAllocations(); size_t StepSizeToMakeProgress(); @@ -282,6 +297,8 @@ class IncrementalMarking { GCRequestType request_type_; IncrementalMarkingJob incremental_marking_job_; + Observer new_generation_observer_; + Observer old_generation_observer_; DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); }; diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 9086b84e00dc..7bcc65d5f9c8 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -2582,15 +2582,6 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) { owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached( Heap::kNoGCFlags, kNoGCCallbackFlags); - // We cannot place incremental marking step in an AllocationObserver because - // 1) incremental marking step can change linear allocation area. - // 2) allocation observers are called after allocation. - // 3) allocation folding assumes that the newly allocated object immediately - // precedes the linear allocation area. - // See crbug.com/659165. - owner_->heap() - ->incremental_marking() - ->AdvanceIncrementalMarkingOnAllocation(); size_t new_node_size = 0; FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); @@ -3020,8 +3011,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags, kNoGCCallbackFlags); - heap()->incremental_marking()->AdvanceIncrementalMarkingOnAllocation(); - AllocationStep(object->address(), object_size); if (heap()->incremental_marking()->black_allocation()) {