TLBI ALLE1* is a pretty big hammer that invalides all S1/S2 TLBs. This translates into the unmapping of all our shadow S2 PTs, itself resulting in the corresponding TLB invalidations. Co-developed-by: Jintack Lim <jintack.lim@xxxxxxxxxx> Co-developed-by: Christoffer Dall <christoffer.dall@xxxxxxx> Signed-off-by: Jintack Lim <jintack.lim@xxxxxxxxxx> Signed-off-by: Christoffer Dall <christoffer.dall@xxxxxxx> Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/kvm/sys_regs.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 22a3691ce248..d8d6380b7c66 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2757,6 +2757,29 @@ static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr) return true; } +static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); + + if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) { + kvm_inject_undefined(vcpu); + return false; + } + + write_lock(&vcpu->kvm->mmu_lock); + + /* + * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the + * corresponding VMIDs. + */ + kvm_nested_s2_unmap(vcpu->kvm); + + write_unlock(&vcpu->kvm->mmu_lock); + + return true; +} + /* Only defined here as this is an internal "abstraction" */ union tlbi_info { struct { @@ -2880,7 +2903,9 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_VALE1, handle_tlbi_el1), SYS_INSN(TLBI_VAALE1, handle_tlbi_el1), + SYS_INSN(TLBI_ALLE1IS, handle_alle1is), SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is), + SYS_INSN(TLBI_ALLE1, handle_alle1is), SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is), }; -- 2.39.2