Hi, > > Avoid this fault by calling aarch64_insn_write() to update the instruction > > if the module text has already been marked read-only. Preserve the current > > behaviour if called before this has been done. > > > > Signed-off-by: Suraj Jitindar Singh <surajjs@xxxxxxxxxx> > > --- > > arch/arm64/kernel/module.c | 81 ++++++++++++++++++++++---------------- > > 1 file changed, 47 insertions(+), 34 deletions(-) > > > > diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c > > index b5ec010c481f..35596ea870ab 100644 > > --- a/arch/arm64/kernel/module.c > > +++ b/arch/arm64/kernel/module.c > > @@ -19,6 +19,7 @@ > > #include <asm/alternative.h> > > #include <asm/insn.h> > > #include <asm/sections.h> > > +#include <asm/patching.h> > > > > void *module_alloc(unsigned long size) > > { > > @@ -155,7 +156,8 @@ enum aarch64_insn_movw_imm_type { > > }; > > > > static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, > > - int lsb, enum aarch64_insn_movw_imm_type imm_type) > > + int lsb, enum aarch64_insn_movw_imm_type imm_type, > > + bool early) > > { > > u64 imm; > > s64 sval; > > @@ -187,7 +189,10 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, > > > > /* Update the instruction with the new encoding. */ > > insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); > > - *place = cpu_to_le32(insn); > > + if (early) > > + *place = cpu_to_le32(insn); > > + else > > + aarch64_insn_write(place, insn); > > If we really need this, I think it'd be better to refactor the > reloc_insn_*() helpers to generate the new encoding into a temporary > buffer, and make it the caller's responsibiltiy to then perform the > write to the real location in the module. > > That way we only have to handle the "early" distinction in one place > rather than spreading it out. > > I see you haven't altered the reloc_data() function -- does livepatch > never result in data relocations? > > Thanks, > Mark. > Livepatch can also result in data relocation. So I wrote data relocation test with kpatch and send pull-request [1]. Suraj, please try it if you would like. [1] https://github.com/dynup/kpatch/pull/1244 Thanks, Keiya > > > > if (imm > U16_MAX) > > return -ERANGE; > > @@ -196,7 +201,8 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, > > } > > > > static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, > > - int lsb, int len, enum aarch64_insn_imm_type imm_type) > > + int lsb, int len, enum aarch64_insn_imm_type imm_type, > > + bool early) > > { > > u64 imm, imm_mask; > > s64 sval; > > @@ -212,7 +218,10 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, > > > > /* Update the instruction's immediate field. */ > > insn = aarch64_insn_encode_immediate(imm_type, insn, imm); > > - *place = cpu_to_le32(insn); > > + if (early) > > + *place = cpu_to_le32(insn); > > + else > > + aarch64_insn_write(place, insn); > > > > /* > > * Extract the upper value bits (including the sign bit) and > > @@ -231,17 +240,17 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, > > } > > > > static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, > > - __le32 *place, u64 val) > > + __le32 *place, u64 val, bool early) > > { > > u32 insn; > > > > if (!is_forbidden_offset_for_adrp(place)) > > return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, > > - AARCH64_INSN_IMM_ADR); > > + AARCH64_INSN_IMM_ADR, early); > > > > /* patch ADRP to ADR if it is in range */ > > if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, > > - AARCH64_INSN_IMM_ADR)) { > > + AARCH64_INSN_IMM_ADR, early)) { > > insn = le32_to_cpu(*place); > > insn &= ~BIT(31); > > } else { > > @@ -253,7 +262,10 @@ static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, > > AARCH64_INSN_BRANCH_NOLINK); > > } > > > > - *place = cpu_to_le32(insn); > > + if (early) > > + *place = cpu_to_le32(insn); > > + else > > + aarch64_insn_write(place, insn); > > return 0; > > } > > > > @@ -270,6 +282,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, > > void *loc; > > u64 val; > > Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; > > + bool early = me->state == MODULE_STATE_UNFORMED; > > > > for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { > > /* loc corresponds to P in the AArch64 ELF document. */ > > @@ -322,88 +335,88 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, > > fallthrough; > > case R_AARCH64_MOVW_UABS_G0: > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_UABS_G1_NC: > > overflow_check = false; > > fallthrough; > > case R_AARCH64_MOVW_UABS_G1: > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_UABS_G2_NC: > > overflow_check = false; > > fallthrough; > > case R_AARCH64_MOVW_UABS_G2: > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_UABS_G3: > > /* We're using the top bits so we can't overflow. */ > > overflow_check = false; > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_SABS_G0: > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > case R_AARCH64_MOVW_SABS_G1: > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > case R_AARCH64_MOVW_SABS_G2: > > ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G0_NC: > > overflow_check = false; > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G0: > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G1_NC: > > overflow_check = false; > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G1: > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G2_NC: > > overflow_check = false; > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, > > - AARCH64_INSN_IMM_MOVKZ); > > + AARCH64_INSN_IMM_MOVKZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G2: > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > case R_AARCH64_MOVW_PREL_G3: > > /* We're using the top bits so we can't overflow. */ > > overflow_check = false; > > ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, > > - AARCH64_INSN_IMM_MOVNZ); > > + AARCH64_INSN_IMM_MOVNZ, early); > > break; > > > > /* Immediate instruction relocations. */ > > case R_AARCH64_LD_PREL_LO19: > > ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, > > - AARCH64_INSN_IMM_19); > > + AARCH64_INSN_IMM_19, early); > > break; > > case R_AARCH64_ADR_PREL_LO21: > > ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, > > - AARCH64_INSN_IMM_ADR); > > + AARCH64_INSN_IMM_ADR, early); > > break; > > case R_AARCH64_ADR_PREL_PG_HI21_NC: > > overflow_check = false; > > fallthrough; > > case R_AARCH64_ADR_PREL_PG_HI21: > > - ovf = reloc_insn_adrp(me, sechdrs, loc, val); > > + ovf = reloc_insn_adrp(me, sechdrs, loc, val, early); > > if (ovf && ovf != -ERANGE) > > return ovf; > > break; > > @@ -411,40 +424,40 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, > > case R_AARCH64_LDST8_ABS_LO12_NC: > > overflow_check = false; > > ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, > > - AARCH64_INSN_IMM_12); > > + AARCH64_INSN_IMM_12, early); > > break; > > case R_AARCH64_LDST16_ABS_LO12_NC: > > overflow_check = false; > > ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, > > - AARCH64_INSN_IMM_12); > > + AARCH64_INSN_IMM_12, early); > > break; > > case R_AARCH64_LDST32_ABS_LO12_NC: > > overflow_check = false; > > ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, > > - AARCH64_INSN_IMM_12); > > + AARCH64_INSN_IMM_12, early); > > break; > > case R_AARCH64_LDST64_ABS_LO12_NC: > > overflow_check = false; > > ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, > > - AARCH64_INSN_IMM_12); > > + AARCH64_INSN_IMM_12, early); > > break; > > case R_AARCH64_LDST128_ABS_LO12_NC: > > overflow_check = false; > > ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, > > - AARCH64_INSN_IMM_12); > > + AARCH64_INSN_IMM_12, early); > > break; > > case R_AARCH64_TSTBR14: > > ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, > > - AARCH64_INSN_IMM_14); > > + AARCH64_INSN_IMM_14, early); > > break; > > case R_AARCH64_CONDBR19: > > ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, > > - AARCH64_INSN_IMM_19); > > + AARCH64_INSN_IMM_19, early); > > break; > > case R_AARCH64_JUMP26: > > case R_AARCH64_CALL26: > > ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, > > - AARCH64_INSN_IMM_26); > > + AARCH64_INSN_IMM_26, early); > > > > if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && > > ovf == -ERANGE) { > > @@ -452,7 +465,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, > > if (!val) > > return -ENOEXEC; > > ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, > > - 26, AARCH64_INSN_IMM_26); > > + 26, AARCH64_INSN_IMM_26, early); > > } > > break; > > > > -- > > 2.17.1 > >