On 22.08.19 10:45, Anup Patel wrote:
We get illegal instruction trap whenever Guest/VM executes WFI
instruction.
This patch handles WFI trap by blocking the trapped VCPU using
kvm_vcpu_block() API. The blocked VCPU will be automatically
resumed whenever a VCPU interrupt is injected from user-space
or from in-kernel IRQCHIP emulation.
Signed-off-by: Anup Patel <anup.patel@xxxxxxx>
Acked-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/riscv/kvm/vcpu_exit.c | 88 ++++++++++++++++++++++++++++++++++++++
1 file changed, 88 insertions(+)
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index efc06198c259..fbc04fe335ad 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -12,6 +12,9 @@
#include <linux/kvm_host.h>
#include <asm/csr.h>
+#define INSN_MASK_WFI 0xffffff00
+#define INSN_MATCH_WFI 0x10500000
+
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
#define INSN_MATCH_LH 0x1003
@@ -179,6 +182,87 @@ static ulong get_insn(struct kvm_vcpu *vcpu)
return val;
}
+typedef int (*illegal_insn_func)(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ ulong insn);
+
+static int truly_illegal_insn(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ ulong insn)
+{
+ /* TODO: Redirect trap to Guest VCPU */
+ return -ENOTSUPP;
+}
+
+static int system_opcode_insn(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ ulong insn)
+{
+ if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
+ vcpu->stat.wfi_exit_stat++;
+ if (!kvm_arch_vcpu_runnable(vcpu)) {
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_block(vcpu);
+ vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ }
+ vcpu->arch.guest_context.sepc += INSN_LEN(insn);
+ return 1;
+ }
+
+ return truly_illegal_insn(vcpu, run, insn);
+}
+
+static illegal_insn_func illegal_insn_table[32] = {
Every time I did experiments on PowerPC with indirect tables like this
over switch() in C, the switch() code won. CPUs are pretty good at
predicting branches. Predicting indirect jumps however, they are
terrible at.
So unless you consider the jump table more readable / maintainable, I
would suggest to use a simple switch() statement. It will be faster and
smaller.
Alex
+ truly_illegal_insn, /* 0 */
+ truly_illegal_insn, /* 1 */
+ truly_illegal_insn, /* 2 */
+ truly_illegal_insn, /* 3 */
+ truly_illegal_insn, /* 4 */
+ truly_illegal_insn, /* 5 */
+ truly_illegal_insn, /* 6 */
+ truly_illegal_insn, /* 7 */
+ truly_illegal_insn, /* 8 */
+ truly_illegal_insn, /* 9 */
+ truly_illegal_insn, /* 10 */
+ truly_illegal_insn, /* 11 */
+ truly_illegal_insn, /* 12 */
+ truly_illegal_insn, /* 13 */
+ truly_illegal_insn, /* 14 */
+ truly_illegal_insn, /* 15 */
+ truly_illegal_insn, /* 16 */
+ truly_illegal_insn, /* 17 */
+ truly_illegal_insn, /* 18 */
+ truly_illegal_insn, /* 19 */
+ truly_illegal_insn, /* 20 */
+ truly_illegal_insn, /* 21 */
+ truly_illegal_insn, /* 22 */
+ truly_illegal_insn, /* 23 */
+ truly_illegal_insn, /* 24 */
+ truly_illegal_insn, /* 25 */
+ truly_illegal_insn, /* 26 */
+ truly_illegal_insn, /* 27 */
+ system_opcode_insn, /* 28 */
+ truly_illegal_insn, /* 29 */
+ truly_illegal_insn, /* 30 */
+ truly_illegal_insn /* 31 */
+};
+
+static int illegal_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long stval)
+{
+ ulong insn = stval;
+
+ if (unlikely((insn & 3) != 3)) {
+ if (insn == 0)
+ insn = get_insn(vcpu);
+ if ((insn & 3) != 3)
+ return truly_illegal_insn(vcpu, run, insn);
+ }
+
+ return illegal_insn_table[(insn & 0x7c) >> 2](vcpu, run, insn);
+}
+
static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr)
{
@@ -439,6 +523,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
ret = -EFAULT;
run->exit_reason = KVM_EXIT_UNKNOWN;
switch (scause) {
+ case EXC_INST_ILLEGAL:
+ if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
+ ret = illegal_inst_fault(vcpu, run, stval);
+ break;
case EXC_INST_PAGE_FAULT:
case EXC_LOAD_PAGE_FAULT:
case EXC_STORE_PAGE_FAULT: