Skip to content

Commit

Permalink
[mips][sparkplug] Use return to jump to optimized
Browse files Browse the repository at this point in the history
code to keep the RSB balanced

Besides, extract common code to
MaybeOptimizeCodeOrTailCallOptimizedCode
and cache the instance in a register,

Port: af3c530
Port: 89ea44b
Port: adf035f
Change-Id: I3fde5b0995ea8aa51faeb3fd743cebef748ba745
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2710212
Auto-Submit: Liu yu <liuyu@loongson.cn>
Reviewed-by: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/master@{#72884}
  • Loading branch information
LiuYu396 authored and Commit Bot committed Feb 22, 2021
1 parent b567875 commit 2d3d747
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 54 deletions.
44 changes: 24 additions & 20 deletions src/builtins/mips/builtins-mips.cc
Original file line number Diff line number Diff line change
Expand Up @@ -957,6 +957,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}

static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t1, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));

Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);

__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Lw(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));

TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
}

// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
Expand Down Expand Up @@ -1160,26 +1182,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);

__ bind(&has_optimized_code_or_marker);

Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t1, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));

Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);

__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Lw(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));

TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);

__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
Expand Down
43 changes: 24 additions & 19 deletions src/builtins/mips64/builtins-mips64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -974,6 +974,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}

static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t0, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));

Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);

__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Ld(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));

TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
}

// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
Expand Down Expand Up @@ -1178,25 +1200,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);

__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t0, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));

Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);

__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Ld(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));

TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);

__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
Expand Down
3 changes: 2 additions & 1 deletion src/codegen/mips/macro-assembler-mips.h
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void JumpCodeObject(Register code_object) override {
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
Expand Down
3 changes: 2 additions & 1 deletion src/codegen/mips64/macro-assembler-mips64.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void JumpCodeObject(Register code_object) override {
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
Expand Down
17 changes: 11 additions & 6 deletions src/wasm/baseline/mips/liftoff-assembler-mips.h
Original file line number Diff line number Diff line change
Expand Up @@ -403,25 +403,30 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}

void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
DCHECK_LE(0, offset);
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}

void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int32_t offset, int size) {
DCHECK_LE(0, offset);
switch (size) {
case 1:
lb(dst, MemOperand(dst, offset));
lb(dst, MemOperand(instance, offset));
break;
case 4:
lw(dst, MemOperand(dst, offset));
lw(dst, MemOperand(instance, offset));
break;
default:
UNIMPLEMENTED();
}
}

void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
lw(dst, MemOperand(instance, offset));
}

void LiftoffAssembler::SpillInstance(Register instance) {
Expand Down
19 changes: 12 additions & 7 deletions src/wasm/baseline/mips64/liftoff-assembler-mips64.h
Original file line number Diff line number Diff line change
Expand Up @@ -385,28 +385,33 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}

void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
DCHECK_LE(0, offset);
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
}

void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int offset, int size) {
DCHECK_LE(0, offset);
switch (size) {
case 1:
Lb(dst, MemOperand(dst, offset));
Lb(dst, MemOperand(instance, offset));
break;
case 4:
Lw(dst, MemOperand(dst, offset));
Lw(dst, MemOperand(instance, offset));
break;
case 8:
Ld(dst, MemOperand(dst, offset));
Ld(dst, MemOperand(instance, offset));
break;
default:
UNIMPLEMENTED();
}
}

void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Ld(dst, MemOperand(instance, offset));
}

void LiftoffAssembler::SpillInstance(Register instance) {
Expand Down

0 comments on commit 2d3d747

Please sign in to comment.