Skip to content

Commit

Permalink
Revert of [heap] Invoke incremental marking step before allocation. (…
Browse files Browse the repository at this point in the history
…patchset #1 id:1 of https://codereview.chromium.org/2464393002/ )

Reason for revert:
Performance regression on Octane and V8 runtime stats.

Original issue's description:
> [heap] Invoke incremental marking step before allocation.
>
> This ensures that the newly allocated object immediatly precedes the
> linear allocation area, which is needed for allocation folding.
>
> For more info see:
> https://bugs.chromium.org/p/chromium/issues/detail?id=659165#c13
>
> BUG=chromium:659165

TBR=hpayer@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:659165

Review-Url: https://codereview.chromium.org/2472043002
Cr-Commit-Position: refs/heads/master@{#40713}
  • Loading branch information
ulan authored and Commit bot committed Nov 3, 2016
1 parent 4447405 commit 8f85aba
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 39 deletions.
73 changes: 46 additions & 27 deletions src/heap/incremental-marking.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE) {}
request_type_(NONE),
new_generation_observer_(*this, kAllocatedThreshold),
old_generation_observer_(*this, kAllocatedThreshold) {}

bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
Expand Down Expand Up @@ -487,6 +489,16 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
state_ = SWEEPING;
}

SpaceIterator it(heap_);
while (it.has_next()) {
Space* space = it.next();
if (space == heap_->new_space()) {
space->AddAllocationObserver(&new_generation_observer_);
} else {
space->AddAllocationObserver(&old_generation_observer_);
}
}

incremental_marking_job()->Start(heap_);
}

Expand Down Expand Up @@ -945,6 +957,16 @@ void IncrementalMarking::Stop() {
Max(0, old_generation_size_mb - old_generation_limit_mb));
}

SpaceIterator it(heap_);
while (it.has_next()) {
Space* space = it.next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
space->RemoveAllocationObserver(&old_generation_observer_);
}
}

IncrementalMarking::set_should_hurry(false);
if (IsMarking()) {
PatchIncrementalMarkingRecordWriteStubs(heap_,
Expand Down Expand Up @@ -1063,33 +1085,30 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
return;
}

size_t bytes_to_process = StepSizeToKeepUpWithAllocations();

if (bytes_to_process < IncrementalMarking::kAllocatedThreshold) {
return;
}

bytes_to_process += StepSizeToMakeProgress();

// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
kMaxStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);

size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule.
// We skip processing of marking dequeue here and thus
// shift marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed = bytes_to_process;
} else {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
FORCE_COMPLETION, StepOrigin::kV8);
size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();

if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
kMaxStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);

size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule.
// We skip processing of marking dequeue here and thus
// shift marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed = bytes_to_process;
} else {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
FORCE_COMPLETION, StepOrigin::kV8);
}
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}

size_t IncrementalMarking::Step(size_t bytes_to_process,
Expand Down
19 changes: 18 additions & 1 deletion src/heap/incremental-marking.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ class IncrementalMarking {
CompletionAction completion_action,
ForceCompletionAction force_completion,
StepOrigin step_origin);
void AdvanceIncrementalMarkingOnAllocation();

// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
Expand Down Expand Up @@ -219,6 +218,20 @@ class IncrementalMarking {
void AbortBlackAllocation();

private:
class Observer : public AllocationObserver {
public:
Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}

void Step(int bytes_allocated, Address, size_t) override {
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
}

private:
IncrementalMarking& incremental_marking_;
};

int64_t SpaceLeftInOldSpace();

void StartMarking();
Expand Down Expand Up @@ -256,6 +269,8 @@ class IncrementalMarking {

void IncrementIdleMarkingDelayCounter();

void AdvanceIncrementalMarkingOnAllocation();

size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();

Expand All @@ -282,6 +297,8 @@ class IncrementalMarking {
GCRequestType request_type_;

IncrementalMarkingJob incremental_marking_job_;
Observer new_generation_observer_;
Observer old_generation_observer_;

DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
Expand Down
11 changes: 0 additions & 11 deletions src/heap/spaces.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2582,15 +2582,6 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {

owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kNoGCCallbackFlags);
// We cannot place incremental marking step in an AllocationObserver because
// 1) incremental marking step can change linear allocation area.
// 2) allocation observers are called after allocation.
// 3) allocation folding assumes that the newly allocated object immediately
// precedes the linear allocation area.
// See crbug.com/659165.
owner_->heap()
->incremental_marking()
->AdvanceIncrementalMarkingOnAllocation();

size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
Expand Down Expand Up @@ -3020,8 +3011,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,

heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
heap()->incremental_marking()->AdvanceIncrementalMarkingOnAllocation();

AllocationStep(object->address(), object_size);

if (heap()->incremental_marking()->black_allocation()) {
Expand Down

0 comments on commit 8f85aba

Please sign in to comment.