On Fri, Oct 31, 2014 at 05:26:36PM +0000, Andre Przywara wrote: > The virtual MPIDR registers (containing topology information) for the > guest are currently mapped linearily to the vcpu_id. Improve this > mapping for arm64 by using three levels to not artificially limit the > number of vCPUs. Also add an accessor to later allow easier access to > a vCPU with a given MPIDR. > Use this new accessor in the PSCI emulation. > > Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> > --- > arch/arm/include/asm/kvm_emulate.h | 3 ++- > arch/arm/include/asm/kvm_host.h | 2 ++ > arch/arm/kvm/arm.c | 15 +++++++++++++++ > arch/arm/kvm/psci.c | 15 ++++----------- > arch/arm64/include/asm/kvm_emulate.h | 3 ++- > arch/arm64/include/asm/kvm_host.h | 2 ++ > arch/arm64/kvm/sys_regs.c | 11 +++++++++-- > 7 files changed, 36 insertions(+), 15 deletions(-) > > diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h > index b9db269..bd54383 100644 > --- a/arch/arm/include/asm/kvm_emulate.h > +++ b/arch/arm/include/asm/kvm_emulate.h > @@ -23,6 +23,7 @@ > #include <asm/kvm_asm.h> > #include <asm/kvm_mmio.h> > #include <asm/kvm_arm.h> > +#include <asm/cputype.h> > > unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); > unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); > @@ -164,7 +165,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) > > static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) > { > - return vcpu->arch.cp15[c0_MPIDR]; > + return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; > } continuing the discussion from the previous version, yes, please don't call it get_mpidr() if it returns a masked off version of get_mpidr(), then call it get_mpidr_hwid() or something. > > static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) > diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h > index 53036e2..b443dfe 100644 > --- a/arch/arm/include/asm/kvm_host.h > +++ b/arch/arm/include/asm/kvm_host.h > @@ -236,6 +236,8 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic) > int kvm_perf_init(void); > int kvm_perf_teardown(void); > > +struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); > + > static inline void kvm_arch_hardware_disable(void) {} > static inline void kvm_arch_hardware_unsetup(void) {} > static inline void kvm_arch_sync_events(struct kvm *kvm) {} > diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c > index 9e193c8..61f13cc 100644 > --- a/arch/arm/kvm/arm.c > +++ b/arch/arm/kvm/arm.c > @@ -977,6 +977,21 @@ static void check_kvm_target_cpu(void *ret) > *(int *)ret = kvm_target_cpu(); > } > > +struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) > +{ > + unsigned long c_mpidr; > + struct kvm_vcpu *vcpu; > + int i; > + > + mpidr &= MPIDR_HWID_BITMASK; > + kvm_for_each_vcpu(i, vcpu, kvm) { > + c_mpidr = kvm_vcpu_get_mpidr(vcpu); > + if (c_mpidr == mpidr) > + return vcpu; why do you need the c_mpidr variable at all? > + } > + return NULL; > +} > + > /** > * Initialize Hyp-mode and memory mappings on all CPUs. > */ > diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c > index 09cf377..49f0992 100644 > --- a/arch/arm/kvm/psci.c > +++ b/arch/arm/kvm/psci.c > @@ -21,6 +21,7 @@ > #include <asm/cputype.h> > #include <asm/kvm_emulate.h> > #include <asm/kvm_psci.h> > +#include <asm/kvm_host.h> > > /* > * This is an implementation of the Power State Coordination Interface > @@ -65,25 +66,17 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) > static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) > { > struct kvm *kvm = source_vcpu->kvm; > - struct kvm_vcpu *vcpu = NULL, *tmp; > + struct kvm_vcpu *vcpu = NULL; > wait_queue_head_t *wq; > unsigned long cpu_id; > unsigned long context_id; > - unsigned long mpidr; > phys_addr_t target_pc; > - int i; > > - cpu_id = *vcpu_reg(source_vcpu, 1); > + cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; > if (vcpu_mode_is_32bit(source_vcpu)) > cpu_id &= ~((u32) 0); > > - kvm_for_each_vcpu(i, tmp, kvm) { > - mpidr = kvm_vcpu_get_mpidr(tmp); > - if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { > - vcpu = tmp; > - break; > - } > - } > + vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); > > /* > * Make sure the caller requested a valid CPU and that the CPU is > diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h > index 5674a55..37316dd 100644 > --- a/arch/arm64/include/asm/kvm_emulate.h > +++ b/arch/arm64/include/asm/kvm_emulate.h > @@ -27,6 +27,7 @@ > #include <asm/kvm_arm.h> > #include <asm/kvm_mmio.h> > #include <asm/ptrace.h> > +#include <asm/cputype.h> > > unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); > unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); > @@ -184,7 +185,7 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) > > static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) > { > - return vcpu_sys_reg(vcpu, MPIDR_EL1); > + return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; > } > > static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > index 2012c4b..286bb61 100644 > --- a/arch/arm64/include/asm/kvm_host.h > +++ b/arch/arm64/include/asm/kvm_host.h > @@ -207,6 +207,8 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, > int kvm_perf_init(void); > int kvm_perf_teardown(void); > > +struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); > + > static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, > phys_addr_t pgd_ptr, > unsigned long hyp_stack_ptr, > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 4cc3b71..dcc5867 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -252,10 +252,17 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) > > static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) > { > + u64 mpidr; > + > /* > - * Simply map the vcpu_id into the Aff0 field of the MPIDR. > + * Map the vcpu_id into the first three Aff fields of the MPIDR. > + * Aff0 uses only 16 CPUs, since there is a SGI injection > + * limitation of GICv3. This last sentence is worded weirdly, so I suggested an alternative version in my last review, which you missed/ignored. Please address it. Thanks, -Christoffer _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm