The arm64 kernel also supports packing of relocation data using the RELR format. Implement a parser of RELR data and fixup the relocations using the same infra as RELA relocs. Signed-off-by: David Brazdil <dbrazdil@xxxxxxxxxx> --- arch/arm64/kvm/va_layout.c | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index b80fab974896..7f45a98eacfd 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -145,6 +145,43 @@ static void __fixup_hyp_rela(void) __fixup_hyp_rel(rel[i].r_offset); } +#ifdef CONFIG_RELR +static void __fixup_hyp_relr(void) +{ + u64 *rel, *end; + + rel = (u64*)(kimage_vaddr + __load_elf_u64(__relr_offset)); + end = rel + (__load_elf_u64(__relr_size) / sizeof(*rel)); + + while (rel < end) { + unsigned n; + u64 addr = *(rel++); + + /* Address must not have the LSB set. */ + BUG_ON(addr & BIT(0)); + + /* Fix up the first address of the chain. */ + __fixup_hyp_rel(addr); + + /* + * Loop over bitmaps, i.e. as long as words' LSB is 1. + * Each bit (ordered from LSB to MSB) represents one word from + * the last full address (exclusive). If the corresponding bit + * is 1, there is a relative relocation on that word. + */ + for (n = 0; rel < end && (*rel & BIT(0)); n++) { + unsigned i; + u64 bitmap = *(rel++); + + for (i = 1; i < 64; ++i) { + if ((bitmap & BIT(i))) + __fixup_hyp_rel(addr + 8 * (63 * n + i)); + } + } + } +} +#endif + /* * The kernel relocated pointers to kernel VA. Iterate over relocations in * the hypervisor ELF sections and convert them to hyp VA. This avoids the @@ -156,6 +193,10 @@ __init void kvm_fixup_hyp_relocations(void) return; __fixup_hyp_rela(); + +#ifdef CONFIG_RELR + __fixup_hyp_relr(); +#endif } static u32 compute_instruction(int n, u32 rd, u32 rn) -- 2.29.2.299.gdc1121823c-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm