From: Scott Wood <scottwood@xxxxxxxxxxxxx> Since TLB1 loading doesn't check the shadow TLB before allocating another entry, you can get duplicates. Once shadow PIDs are enabled in a later patch, we won't need to invalidate the TLB on every switch, so this optimization won't be needed anyway. Signed-off-by: Scott Wood <scottwood@xxxxxxxxxxxxx> Signed-off-by: Alexander Graf <agraf@xxxxxxx> --- arch/powerpc/kvm/e500_tlb.c | 18 ------------------ 1 files changed, 0 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index b18fe35..e0ab216 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -144,24 +144,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) { - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int i; - unsigned register mas0; - - /* Load all valid TLB1 entries to reduce guest tlb miss fault */ - local_irq_disable(); - mas0 = mfspr(SPRN_MAS0); - for (i = 0; i < tlb1_max_shadow_size(); i++) { - struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i]; - - if (get_tlb_v(stlbe)) { - mtspr(SPRN_MAS0, MAS0_TLBSEL(1) - | MAS0_ESEL(to_htlb1_esel(i))); - __write_host_tlbe(stlbe); - } - } - mtspr(SPRN_MAS0, mas0); - local_irq_enable(); } void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) -- 1.6.0.2 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html