Rename set_pte to kvm_set_pte to avoid name conflict with more system-wide designs, especially on arm64. Use pte_val around the *pte assignment to compile with strict mm type checking enabled. Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> Signed-off-by: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx> --- arch/arm/kvm/mmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 720bbd5..c7226fe 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -41,9 +41,9 @@ static void kvm_tlb_flush_vmid(struct kvm *kvm) kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); } -static void set_pte(pte_t *pte, pte_t new_pte) +static void kvm_set_pte(pte_t *pte, pte_t new_pte) { - *pte = new_pte; + pte_val(*pte) = new_pte; /* * flush_pmd_entry just takes a void pointer and cleans the necessary * cache entries, so we can reuse the function for ptes. @@ -150,13 +150,13 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, pte = pte_offset_kernel(pmd, addr); if (pfn_base) { BUG_ON(pfn_valid(*pfn_base)); - set_pte(pte, pfn_pte(*pfn_base, prot)); + kvm_set_pte(pte, pfn_pte(*pfn_base, prot)); (*pfn_base)++; } else { struct page *page; BUG_ON(!virt_addr_valid(addr)); page = virt_to_page(addr); - set_pte(pte, mk_pte(page, prot)); + kvm_set_pte(pte, mk_pte(page, prot)); } } @@ -403,7 +403,7 @@ static void stage2_clear_pte(struct kvm *kvm, phys_addr_t addr) return; pte = pte_offset_kernel(pmd, addr); - set_pte(pte, __pte(0)); + kvm_set_pte(pte, __pte(0)); page = virt_to_page(pte); put_page(page); @@ -469,7 +469,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, /* Create 2nd stage page table mapping - Level 3 */ old_pte = *pte; - set_pte(pte, *new_pte); + kvm_set_pte(pte, *new_pte); if (pte_present(old_pte)) kvm_tlb_flush_vmid(kvm); else -- 1.7.9.5 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm