In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.
Signed-off-by: Tianjia Zhang <tianjia.zhang@xxxxxxxxxxxxxxxxx>
---
arch/mips/include/asm/kvm_host.h | 4 ++--
arch/mips/kvm/entry.c | 15 +++++----------
arch/mips/kvm/mips.c | 3 ++-
arch/mips/kvm/trap_emul.c | 2 +-
arch/mips/kvm/vz.c | 2 +-
5 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *guest_ebase;
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
/* Building of entry/exception code */
int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..e3f29af3b6cd 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int reg)
* Assemble the start of the vcpu_run function to run a guest VCPU. The function
* conforms to the following prototype:
*
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
*
* The exit from the guest and return to the caller is handled by the code
* generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
/*
- * A0: run
- * A1: vcpu
+ * A0: vcpu
*/
/* k0/k1 not being used in host kernel context */
@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(&p, V1, K1);
/* VCPU scratch register has pointer to vcpu */
- UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
/* Offset into vcpu->arch */
- UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
+ UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
/*
* Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
/* Restore vcpu */
- UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
- /* Restore run (vcpu->run) */
- UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
+ UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
/*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
* with this in the kernel
*/
uasm_i_move(&p, A0, S0);
- uasm_i_move(&p, A1, S1);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);