From 44eebf92a4b3b129fd9766105efe68bad32052fd Mon Sep 17 00:00:00 2001 From: Dylan Hatch Date: Fri, 17 Jan 2025 20:28:11 +0000 Subject: [PATCH] create-diff-object: Add support for arm64 DYNAMIC_FTRACE_WITH_CALL_OPS For arm64 this option uses -fpatchable-function-entry=M,2, so 2 NOPs are placed before the function entry point (in order to store a pointer to ftrace_ops). When calculating function padding, check for the presence of the two NOPs, and adjust the padding size by 8 if they are found. This was merged in the upstream kernel in v6.8 with: baaf553d3bc3 ("arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS") With this into the equation, the entry of a function can look like one of: 1. Without DYNAMIC_FTRACE_WITH_CALL_OPS and CONFIG_ARM64_BTI_KERNEL ------------------------------------------------------------------- Disassembly of section .text.cmdline_proc_show: 0000000000000008 : 8: d503201f nop c: d503201f nop 2. Without DYNAMIC_FTRACE_WITH_CALL_OPS and with CONFIG_ARM64_BTI_KERNEL ------------------------------------------------------------------------ Disassembly of section .text.cmdline_proc_show: 0000000000000008 : 0: d503245f bti c 4: d503201f nop 8: d503201f nop 3. With DYNAMIC_FTRACE_WITH_CALL_OPS and without CONFIG_ARM64_BTI_KERNEL ------------------------------------------------------------------------ Disassembly of section .text.cmdline_proc_show: 0000000000000000 : 0: d503201f nop 4: d503201f nop 0000000000000008 : 8: d503201f nop c: d503201f nop 4. With DYNAMIC_FTRACE_WITH_CALL_OPS and with CONFIG_ARM64_BTI_KERNEL --------------------------------------------------------------------- Disassembly of section .text.cmdline_proc_show: 0000000000000000 : 0: d503201f nop 4: d503201f nop 0000000000000008 : 8: d503245f bti c c: d503201f nop 10: d503201f nop make create-diff-object aware of DYNAMIC_FTRACE_WITH_CALL_OPS and its quirks. Signed-off-by: Dylan Hatch Signed-off-by: Puranjay Mohan --- kpatch-build/create-diff-object.c | 38 +++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 99879321..bb3cd7a5 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -255,6 +255,37 @@ static bool kpatch_is_mapping_symbol(struct kpatch_elf *kelf, struct symbol *sym return false; } +static unsigned int function_padding_size(struct kpatch_elf *kelf, struct symbol *sym) +{ + unsigned int size = 0; + + switch (kelf->arch) { + case AARCH64: + { + uint32_t *insn = sym->sec->data->d_buf; + unsigned int i; + void *insn_end = sym->sec->data->d_buf + sym->sym.st_value; + + /* + * If the arm64 kernel is compiled with CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + * then there are two NOPs before the function and a `BTI C` + 2 NOPs at the + * start of the function. Verify the presence of the two NOPs before the + * function entry. + */ + for (i = 0; (void *)insn < insn_end && *insn == 0xd503201f; i++, insn++) + ; + + if (i == 2) + size = 8; + break; + } + default: + break; + } + + return size; +} + /* * When compiling with -ffunction-sections and -fdata-sections, almost every * symbol gets its own dedicated section. We call such symbols "bundled" @@ -271,6 +302,8 @@ static void kpatch_bundle_symbols(struct kpatch_elf *kelf) expected_offset = sym->pfx->sym.st_size; else if (is_gcc6_localentry_bundled_sym(kelf, sym)) expected_offset = 8; + else if (sym->type == STT_FUNC) + expected_offset = function_padding_size(kelf, sym); else expected_offset = 0; @@ -3808,6 +3841,11 @@ static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf, bool unsigned char *insn = sym->sec->data->d_buf; int i; + /* + * Skip the padding NOPs added by CALL_OPS. + */ + insn += function_padding_size(kelf, sym); + /* * If BTI (Branch Target Identification) is enabled then there * might be an additional 'BTI C' instruction before the two