From: Arjun Roy <arjunroy@xxxxxxxxxx> Add missing page_count() check to vm_insert_pages(), specifically inside insert_page_in_batch_locked(). This was accidentally forgotten in the original patchset. See: https://marc.info/?l=linux-mm&m=158156166403807&w=2 The intention of this patch-set is to reduce atomic ops for tcp zerocopy receives, which normally hits the same spinlock multiple times consecutively. Signed-off-by: Arjun Roy <arjunroy@xxxxxxxxxx> --- mm/memory.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index f1d5f2264aef..3b4007a6ef7f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1463,8 +1463,11 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, struct page *page, pgprot_t prot) { - const int err = validate_page_before_insert(page); + int err; + if (!page_count(page)) + return -EINVAL; + err = validate_page_before_insert(page); return err ? err : insert_page_into_pte_locked( mm, pte_offset_map(pmd, addr), addr, page, prot); } -- 2.25.0.265.gbab2e86ba0-goog