tree: https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git master head: a7d7d3ebc7d441ee48e1fd87d9a8abbf7d528127 commit: 4cc936fd184a2641801c906ea320326317895397 [7/8] KVM: arm64: Relax the restriction on using stage2 PUD huge mapping config: arm-axm55xx_defconfig (attached as .config) compiler: arm-linux-gnueabi-gcc (Debian 8.2.0-11) 8.2.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross git checkout 4cc936fd184a2641801c906ea320326317895397 # save the attached .config to linux build tree GCC_VERSION=8.2.0 make.cross ARCH=arm All errors (new ones prefixed by >>): arch/arm/kvm/../../../virt/kvm/arm/mmu.c: In function 'user_mem_abort': >> arch/arm/kvm/../../../virt/kvm/arm/mmu.c:1705:36: error: implicit declaration of function 'kvm_stage2_has_pmd'; did you mean 'kvm_stage2_has_pud'? [-Werror=implicit-function-declaration] (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && ^~~~~~~~~~~~~~~~~~ kvm_stage2_has_pud cc1: some warnings being treated as errors vim +1705 arch/arm/kvm/../../../virt/kvm/arm/mmu.c 1654 1655 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1656 struct kvm_memory_slot *memslot, unsigned long hva, 1657 unsigned long fault_status) 1658 { 1659 int ret; 1660 bool write_fault, writable, force_pte = false; 1661 bool exec_fault, needs_exec; 1662 unsigned long mmu_seq; 1663 gfn_t gfn = fault_ipa >> PAGE_SHIFT; 1664 struct kvm *kvm = vcpu->kvm; 1665 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 1666 struct vm_area_struct *vma; 1667 kvm_pfn_t pfn; 1668 pgprot_t mem_type = PAGE_S2; 1669 bool logging_active = memslot_is_logging(memslot); 1670 unsigned long vma_pagesize, flags = 0; 1671 1672 write_fault = kvm_is_write_fault(vcpu); 1673 exec_fault = kvm_vcpu_trap_is_iabt(vcpu); 1674 VM_BUG_ON(write_fault && exec_fault); 1675 1676 if (fault_status == FSC_PERM && !write_fault && !exec_fault) { 1677 kvm_err("Unexpected L2 read permission error\n"); 1678 return -EFAULT; 1679 } 1680 1681 if (!fault_supports_stage2_pmd_mappings(memslot, hva)) 1682 force_pte = true; 1683 1684 if (logging_active) 1685 force_pte = true; 1686 1687 /* Let's check if we will get back a huge page backed by hugetlbfs */ 1688 down_read(¤t->mm->mmap_sem); 1689 vma = find_vma_intersection(current->mm, hva, hva + 1); 1690 if (unlikely(!vma)) { 1691 kvm_err("Failed to find VMA for hva 0x%lx\n", hva); 1692 up_read(¤t->mm->mmap_sem); 1693 return -EFAULT; 1694 } 1695 1696 vma_pagesize = vma_kernel_pagesize(vma); 1697 /* 1698 * The stage2 has a minimum of 2 level table (For arm64 see 1699 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can 1700 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). 1701 * As for PUD huge maps, we must make sure that we have at least 1702 * 3 levels, i.e, PMD is not folded. 1703 */ 1704 if ((vma_pagesize == PMD_SIZE || > 1705 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && 1706 !force_pte) { 1707 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1708 } 1709 up_read(¤t->mm->mmap_sem); 1710 1711 /* We need minimum second+third level pages */ 1712 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), 1713 KVM_NR_MEM_OBJS); 1714 if (ret) 1715 return ret; 1716 1717 mmu_seq = vcpu->kvm->mmu_notifier_seq; 1718 /* 1719 * Ensure the read of mmu_notifier_seq happens before we call 1720 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk 1721 * the page we just got a reference to gets unmapped before we have a 1722 * chance to grab the mmu_lock, which ensure that if the page gets 1723 * unmapped afterwards, the call to kvm_unmap_hva will take it away 1724 * from us again properly. This smp_rmb() interacts with the smp_wmb() 1725 * in kvm_mmu_notifier_invalidate_<page|range_end>. 1726 */ 1727 smp_rmb(); 1728 1729 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); 1730 if (pfn == KVM_PFN_ERR_HWPOISON) { 1731 kvm_send_hwpoison_signal(hva, vma); 1732 return 0; 1733 } 1734 if (is_error_noslot_pfn(pfn)) 1735 return -EFAULT; 1736 1737 if (kvm_is_device_pfn(pfn)) { 1738 mem_type = PAGE_S2_DEVICE; 1739 flags |= KVM_S2PTE_FLAG_IS_IOMAP; 1740 } else if (logging_active) { 1741 /* 1742 * Faults on pages in a memslot with logging enabled 1743 * should not be mapped with huge pages (it introduces churn 1744 * and performance degradation), so force a pte mapping. 1745 */ 1746 flags |= KVM_S2_FLAG_LOGGING_ACTIVE; 1747 1748 /* 1749 * Only actually map the page as writable if this was a write 1750 * fault. 1751 */ 1752 if (!write_fault) 1753 writable = false; 1754 } 1755 1756 spin_lock(&kvm->mmu_lock); 1757 if (mmu_notifier_retry(kvm, mmu_seq)) 1758 goto out_unlock; 1759 1760 if (vma_pagesize == PAGE_SIZE && !force_pte) { 1761 /* 1762 * Only PMD_SIZE transparent hugepages(THP) are 1763 * currently supported. This code will need to be 1764 * updated to support other THP sizes. 1765 */ 1766 if (transparent_hugepage_adjust(&pfn, &fault_ipa)) 1767 vma_pagesize = PMD_SIZE; 1768 } 1769 1770 if (writable) 1771 kvm_set_pfn_dirty(pfn); 1772 1773 if (fault_status != FSC_PERM) 1774 clean_dcache_guest_page(pfn, vma_pagesize); 1775 1776 if (exec_fault) 1777 invalidate_icache_guest_page(pfn, vma_pagesize); 1778 1779 /* 1780 * If we took an execution fault we have made the 1781 * icache/dcache coherent above and should now let the s2 1782 * mapping be executable. 1783 * 1784 * Write faults (!exec_fault && FSC_PERM) are orthogonal to 1785 * execute permissions, and we preserve whatever we have. 1786 */ 1787 needs_exec = exec_fault || 1788 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); 1789 1790 if (vma_pagesize == PUD_SIZE) { 1791 pud_t new_pud = kvm_pfn_pud(pfn, mem_type); 1792 1793 new_pud = kvm_pud_mkhuge(new_pud); 1794 if (writable) 1795 new_pud = kvm_s2pud_mkwrite(new_pud); 1796 1797 if (needs_exec) 1798 new_pud = kvm_s2pud_mkexec(new_pud); 1799 1800 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud); 1801 } else if (vma_pagesize == PMD_SIZE) { 1802 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type); 1803 1804 new_pmd = kvm_pmd_mkhuge(new_pmd); 1805 1806 if (writable) 1807 new_pmd = kvm_s2pmd_mkwrite(new_pmd); 1808 1809 if (needs_exec) 1810 new_pmd = kvm_s2pmd_mkexec(new_pmd); 1811 1812 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1813 } else { 1814 pte_t new_pte = kvm_pfn_pte(pfn, mem_type); 1815 1816 if (writable) { 1817 new_pte = kvm_s2pte_mkwrite(new_pte); 1818 mark_page_dirty(kvm, gfn); 1819 } 1820 1821 if (needs_exec) 1822 new_pte = kvm_s2pte_mkexec(new_pte); 1823 1824 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); 1825 } 1826 1827 out_unlock: 1828 spin_unlock(&kvm->mmu_lock); 1829 kvm_set_pfn_accessed(pfn); 1830 kvm_release_pfn_clean(pfn); 1831 return ret; 1832 } 1833 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Attachment:
.config.gz
Description: application/gzip
_______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm