From 2d3d74705d859c60b0b10c2b84b6c3f575685627 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Sat, 20 Feb 2021 19:11:53 +0800 Subject: [PATCH] [mips][sparkplug] Use return to jump to optimized code to keep the RSB balanced Besides, extract common code to MaybeOptimizeCodeOrTailCallOptimizedCode and cache the instance in a register, Port: af3c5307f0a20e1621ea1016e4f936776dfa44c2 Port: 89ea44bf4165ef755fc6b91fddf10cf171f05642 Port: adf035fb4192d3f6516c1af3d83c97a54fab27c0 Change-Id: I3fde5b0995ea8aa51faeb3fd743cebef748ba745 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2710212 Auto-Submit: Liu yu Reviewed-by: Zhao Jiazhong Commit-Queue: Zhao Jiazhong Cr-Commit-Position: refs/heads/master@{#72884} --- src/builtins/mips/builtins-mips.cc | 44 ++++++++++--------- src/builtins/mips64/builtins-mips64.cc | 43 ++++++++++-------- src/codegen/mips/macro-assembler-mips.h | 3 +- src/codegen/mips64/macro-assembler-mips64.h | 3 +- .../baseline/mips/liftoff-assembler-mips.h | 17 ++++--- .../mips64/liftoff-assembler-mips64.h | 19 +++++--- 6 files changed, 75 insertions(+), 54 deletions(-) diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc index bf2f56c3b573..670238a45a9f 100644 --- a/src/builtins/mips/builtins-mips.cc +++ b/src/builtins/mips/builtins-mips.cc @@ -957,6 +957,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ bind(&end); } +static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( + MacroAssembler* masm, Register optimization_state, + Register feedback_vector) { + Label maybe_has_optimized_code; + // Check if optimized code marker is available + __ andi(t1, optimization_state, + FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker); + __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg)); + + Register optimization_marker = optimization_state; + __ DecodeField(optimization_marker); + MaybeOptimizeCode(masm, feedback_vector, optimization_marker); + + __ bind(&maybe_has_optimized_code); + Register optimized_code_entry = optimization_state; + __ Lw(optimization_marker, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); + + TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3); +} + // Generate code for entering a JS function with the interpreter. // On entry to the function the receiver and arguments have been pushed on the // stack left to right. @@ -1160,26 +1182,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ jmp(&after_stack_check_interrupt); __ bind(&has_optimized_code_or_marker); - - Label maybe_has_optimized_code; - // Check if optimized code marker is available - __ andi(t1, optimization_state, - FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker); - __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg)); - - Register optimization_marker = optimization_state; - __ DecodeField(optimization_marker); - MaybeOptimizeCode(masm, feedback_vector, optimization_marker); - // Fall through if there's no runnable optimized code. - __ jmp(¬_optimized); - - __ bind(&maybe_has_optimized_code); - Register optimized_code_entry = optimization_state; - __ Lw(optimization_marker, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset)); - - TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc index 32aed4dc3c6a..c33f46d1c1ef 100644 --- a/src/builtins/mips64/builtins-mips64.cc +++ b/src/builtins/mips64/builtins-mips64.cc @@ -974,6 +974,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ bind(&end); } +static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( + MacroAssembler* masm, Register optimization_state, + Register feedback_vector) { + Label maybe_has_optimized_code; + // Check if optimized code marker is available + __ andi(t0, optimization_state, + FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker); + __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg)); + + Register optimization_marker = optimization_state; + __ DecodeField(optimization_marker); + MaybeOptimizeCode(masm, feedback_vector, optimization_marker); + + __ bind(&maybe_has_optimized_code); + Register optimized_code_entry = optimization_state; + __ Ld(optimization_marker, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); + + TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5); +} + // Generate code for entering a JS function with the interpreter. // On entry to the function the receiver and arguments have been pushed on the // stack left to right. @@ -1178,25 +1200,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ jmp(&after_stack_check_interrupt); __ bind(&has_optimized_code_or_marker); - Label maybe_has_optimized_code; - // Check if optimized code marker is available - __ andi(t0, optimization_state, - FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker); - __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg)); - - Register optimization_marker = optimization_state; - __ DecodeField(optimization_marker); - MaybeOptimizeCode(masm, feedback_vector, optimization_marker); - // Fall through if there's no runnable optimized code. - __ jmp(¬_optimized); - - __ bind(&maybe_has_optimized_code); - Register optimized_code_entry = optimization_state; - __ Ld(optimization_marker, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset)); - - TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); diff --git a/src/codegen/mips/macro-assembler-mips.h b/src/codegen/mips/macro-assembler-mips.h index ec0115cd4603..1fe4c451f955 100644 --- a/src/codegen/mips/macro-assembler-mips.h +++ b/src/codegen/mips/macro-assembler-mips.h @@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // TODO(mips): Implement. UNIMPLEMENTED(); } - void JumpCodeObject(Register code_object) override { + void JumpCodeObject(Register code_object, + JumpMode jump_mode = JumpMode::kJump) override { // TODO(mips): Implement. UNIMPLEMENTED(); } diff --git a/src/codegen/mips64/macro-assembler-mips64.h b/src/codegen/mips64/macro-assembler-mips64.h index e9b35638d17a..0e32366977fa 100644 --- a/src/codegen/mips64/macro-assembler-mips64.h +++ b/src/codegen/mips64/macro-assembler-mips64.h @@ -250,7 +250,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // TODO(mips): Implement. UNIMPLEMENTED(); } - void JumpCodeObject(Register code_object) override { + void JumpCodeObject(Register code_object, + JumpMode jump_mode = JumpMode::kJump) override { // TODO(mips): Implement. UNIMPLEMENTED(); } diff --git a/src/wasm/baseline/mips/liftoff-assembler-mips.h b/src/wasm/baseline/mips/liftoff-assembler-mips.h index 6e19a8e41fdd..332e6afc6a66 100644 --- a/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ b/src/wasm/baseline/mips/liftoff-assembler-mips.h @@ -403,16 +403,19 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset, - int size) { - DCHECK_LE(0, offset); +void LiftoffAssembler::LoadInstanceFromFrame(Register dst) { lw(dst, liftoff::GetInstanceOperand()); +} + +void LiftoffAssembler::LoadFromInstance(Register dst, Register instance, + int32_t offset, int size) { + DCHECK_LE(0, offset); switch (size) { case 1: - lb(dst, MemOperand(dst, offset)); + lb(dst, MemOperand(instance, offset)); break; case 4: - lw(dst, MemOperand(dst, offset)); + lw(dst, MemOperand(instance, offset)); break; default: UNIMPLEMENTED(); @@ -420,8 +423,10 @@ void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset, } void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, + Register instance, int32_t offset) { - LoadFromInstance(dst, offset, kTaggedSize); + STATIC_ASSERT(kTaggedSize == kSystemPointerSize); + lw(dst, MemOperand(instance, offset)); } void LiftoffAssembler::SpillInstance(Register instance) { diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index 848adeb3d9bd..b5cdeb7b0d0c 100644 --- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -385,19 +385,22 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset, - int size) { - DCHECK_LE(0, offset); +void LiftoffAssembler::LoadInstanceFromFrame(Register dst) { Ld(dst, liftoff::GetInstanceOperand()); +} + +void LiftoffAssembler::LoadFromInstance(Register dst, Register instance, + int offset, int size) { + DCHECK_LE(0, offset); switch (size) { case 1: - Lb(dst, MemOperand(dst, offset)); + Lb(dst, MemOperand(instance, offset)); break; case 4: - Lw(dst, MemOperand(dst, offset)); + Lw(dst, MemOperand(instance, offset)); break; case 8: - Ld(dst, MemOperand(dst, offset)); + Ld(dst, MemOperand(instance, offset)); break; default: UNIMPLEMENTED(); @@ -405,8 +408,10 @@ void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset, } void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, + Register instance, int32_t offset) { - LoadFromInstance(dst, offset, kTaggedSize); + STATIC_ASSERT(kTaggedSize == kSystemPointerSize); + Ld(dst, MemOperand(instance, offset)); } void LiftoffAssembler::SpillInstance(Register instance) {