From: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> Fix regression introduced by commit e30d2a170506830d5eef5e9d7990c5aedf1b0a51 KVM: MMU: Optimize guest page table walk On x86_32, get_user() does not support 64-bit values and we fail to build KVM at the point of 64-bit paging. This patch fixes this by using get_user() twice for that condition. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> Reported-by: Jan Kiszka <jan.kiszka@xxxxxx> --- arch/x86/kvm/paging_tmpl.h | 16 +++++++++++++++- 1 files changed, 15 insertions(+), 1 deletions(-) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 652d56c..7d36ac9 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -115,6 +115,20 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) return access; } +static int FNAME(read_gpte)(pt_element_t *pte, pt_element_t __user *ptep_user) +{ +#if defined(CONFIG_X86_32) && (PTTYPE == 64) + u32 *p = (u32 *)pte; + u32 __user *p_user = (u32 __user *)ptep_user; + + if (unlikely(get_user(*p, p_user))) + return -EFAULT; + return get_user(*(p + 1), p_user + 1); +#else + return get_user(*pte, ptep_user); +#endif +} + /* * Fetch a guest pte for a guest virtual address */ @@ -185,7 +199,7 @@ walk: } ptep_user = (pt_element_t __user *)((void *)host_addr + offset); - if (unlikely(get_user(pte, ptep_user))) { + if (unlikely(FNAME(read_gpte)(&pte, ptep_user))) { present = false; break; } -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html