From c33b25bf6357ab30af57bbb1693ed383a2756f44 Mon Sep 17 00:00:00 2001 From: bjaideep Date: Tue, 9 May 2017 06:46:17 -0700 Subject: [PATCH] Revert of PPC/s390: Reland: [TypeFeedbackVector] Store optimized code in the vector (patchset #1 id:1 of https://codereview.chromium.org/2861863003/ ) Reason for revert: Original CL reverted. Crashing on Canary BUG=chromium:718891 Original issue's description: > PPC/s390: Reland: [TypeFeedbackVector] Store optimized code in the vector > > Port 662aa425bac00f468d2b2e91a149b5f35e2a4ad3 > > Original Commit Message: > > Since the feedback vector is itself a native context structure, why > not store optimized code for a function in there rather than in > a map from native context to code? This allows us to get rid of > the optimized code map in the SharedFunctionInfo, saving a pointer, > and making lookup of any optimized code quicker. > > Original patch by Michael Stanton > > R=rmcilroy@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com > BUG=v8:6246 > LOG=N > > Review-Url: https://codereview.chromium.org/2861863003 > Cr-Commit-Position: refs/heads/master@{#45111} > Committed: https://chromium.googlesource.com/v8/v8/+/d587812258c232f7b9a1f1a9a017ba3f9cea12ea TBR=joransiu@ca.ibm.com,jyan@ca.ibm.com,michael_dawson@ca.ibm.com,rmcilroy@chromium.org # Not skipping CQ checks because original CL landed more than 1 days ago. BUG=v8:6246 Review-Url: https://codereview.chromium.org/2870703003 Cr-Commit-Position: refs/heads/master@{#45195} --- src/builtins/ppc/builtins-ppc.cc | 61 ++++++++++++++++++++++----- src/builtins/s390/builtins-s390.cc | 68 ++++++++++++++++++++++++------ 2 files changed, 104 insertions(+), 25 deletions(-) diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc index 06ffa676d1b8..5dfaa49620ca 100644 --- a/src/builtins/ppc/builtins-ppc.cc +++ b/src/builtins/ppc/builtins-ppc.cc @@ -1342,8 +1342,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { // First lookup code, maybe we don't need to compile! Label gotta_call_runtime; Label try_shared; + Label loop_top, loop_bottom; Register closure = r4; + Register map = r9; Register index = r5; // Do we have a valid feedback vector? @@ -1351,29 +1353,58 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); - // Is optimized code available in the feedback vector? + __ LoadP(map, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadP(map, + FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); + __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset)); + __ CmpSmiLiteral(index, Smi::FromInt(2), r0); + __ blt(&try_shared); + + // r10 : native context + // r5 : length / index + // r9 : optimized code map + // r6 : new target + // r4 : closure + Register native_context = r10; + __ LoadP(native_context, NativeContextMemOperand()); + + __ bind(&loop_top); + Register temp = r11; + Register array_pointer = r8; + + // Does the native context match? + __ SmiToPtrArrayOffset(array_pointer, index); + __ add(array_pointer, map, array_pointer); + __ LoadP(temp, FieldMemOperand(array_pointer, + SharedFunctionInfo::kOffsetToPreviousContext)); + __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); + __ cmp(temp, native_context); + __ bne(&loop_bottom); + + // Code available? Register entry = r7; - __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * - kPointerSize + - FeedbackVector::kHeaderSize)); + __ LoadP(entry, + FieldMemOperand(array_pointer, + SharedFunctionInfo::kOffsetToPreviousCachedCode)); __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); __ JumpIfSmi(entry, &try_shared); + // Found code. Get it into the closure and return. // Store code entry in the closure. __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); __ RecordWriteCodeEntryField(closure, entry, r8); - // Load native context into r9. - Register native_context = r9; - __ LoadP(native_context, NativeContextMemOperand()); - // Link the closure into the optimized function list. + // r7 : code entry + // r10: native context + // r4 : closure __ LoadP( r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0); - __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, r5, + __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, temp, kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); const int function_list_offset = @@ -1383,11 +1414,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); // Save closure before the write barrier. __ mr(r8, closure); - __ RecordWriteContextSlot(native_context, function_list_offset, r8, r5, + __ RecordWriteContextSlot(native_context, function_list_offset, r8, temp, kLRHasNotBeenSaved, kDontSaveFPRegs); __ JumpToJSEntry(entry); - // We found no optimized code. + __ bind(&loop_bottom); + __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength), + r0); + __ CmpSmiLiteral(index, Smi::FromInt(1), r0); + __ bgt(&loop_top); + + // We found no code. + __ b(&gotta_call_runtime); + __ bind(&try_shared); __ LoadP(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc index c488dc68421e..5311fcdfaa88 100644 --- a/src/builtins/s390/builtins-s390.cc +++ b/src/builtins/s390/builtins-s390.cc @@ -1345,8 +1345,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { // First lookup code, maybe we don't need to compile! Label gotta_call_runtime; Label try_shared; + Label loop_top, loop_bottom; Register closure = r3; + Register map = r8; Register index = r4; // Do we have a valid feedback vector? @@ -1354,29 +1356,59 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); - // Is optimized code available in the feedback vector? + __ LoadP(map, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadP(map, + FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); + __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset)); + __ CmpSmiLiteral(index, Smi::FromInt(2), r0); + __ blt(&try_shared); + + // Find literals. + // r9 : native context + // r4 : length / index + // r8 : optimized code map + // r5 : new target + // r3 : closure + Register native_context = r9; + __ LoadP(native_context, NativeContextMemOperand()); + + __ bind(&loop_top); + Register temp = r1; + Register array_pointer = r7; + + // Does the native context match? + __ SmiToPtrArrayOffset(array_pointer, index); + __ AddP(array_pointer, map, array_pointer); + __ LoadP(temp, FieldMemOperand(array_pointer, + SharedFunctionInfo::kOffsetToPreviousContext)); + __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); + __ CmpP(temp, native_context); + __ bne(&loop_bottom, Label::kNear); + + // Code available? Register entry = r6; - __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * - kPointerSize + - FeedbackVector::kHeaderSize)); + __ LoadP(entry, + FieldMemOperand(array_pointer, + SharedFunctionInfo::kOffsetToPreviousCachedCode)); __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); __ JumpIfSmi(entry, &try_shared); + // Found code. Get it into the closure and return. // Store code entry in the closure. __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); __ RecordWriteCodeEntryField(closure, entry, r7); - // Load native context into r8. - Register native_context = r8; - __ LoadP(native_context, NativeContextMemOperand()); - // Link the closure into the optimized function list. + // r6 : code entry + // r9: native context + // r3 : closure __ LoadP( r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0); - __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, r4, + __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp, kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); const int function_list_offset = @@ -1386,18 +1418,26 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); // Save closure before the write barrier. __ LoadRR(r7, closure); - __ RecordWriteContextSlot(native_context, function_list_offset, r7, r4, + __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp, kLRHasNotBeenSaved, kDontSaveFPRegs); __ JumpToJSEntry(entry); - // We found no optimized code. + __ bind(&loop_bottom); + __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength), + r0); + __ CmpSmiLiteral(index, Smi::FromInt(1), r0); + __ bgt(&loop_top); + + // We found no code. + __ b(&gotta_call_runtime); + __ bind(&try_shared); __ LoadP(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); // Is the shared function marked for tier up? - __ LoadlB(r7, FieldMemOperand( - entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); - __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); + __ LoadlB(temp, FieldMemOperand( + entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); + __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); __ bne(&gotta_call_runtime); // If SFI points to anything other than CompileLazy, install that.