The patch titled Subject: hugetlb: remove unused hstate in hugetlb_fault_mutex_hash() has been removed from the -mm tree. Its filename was hugetlb-remove-unused-hstate-in-hugetlb_fault_mutex_hash.patch This patch was dropped because an alternative patch was merged ------------------------------------------------------ From: Wei Yang <richardw.yang@xxxxxxxxxxxxxxx> Subject: hugetlb: remove unused hstate in hugetlb_fault_mutex_hash() The first parameter hstate in function hugetlb_fault_mutex_hash() is not used anymore. This patch removes it. Link: http://lkml.kernel.org/r/20191005003302.785-1-richardw.yang@xxxxxxxxxxxxxxx Signed-off-by: Wei Yang <richardw.yang@xxxxxxxxxxxxxxx> Suggested-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Reviewed-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- fs/hugetlbfs/inode.c | 4 ++-- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 12 ++++++------ mm/userfaultfd.c | 5 +---- 4 files changed, 11 insertions(+), 14 deletions(-) --- a/fs/hugetlbfs/inode.c~hugetlb-remove-unused-hstate-in-hugetlb_fault_mutex_hash +++ a/fs/hugetlbfs/inode.c @@ -440,7 +440,7 @@ static void remove_inode_hugepages(struc u32 hash; index = page->index; - hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); + hash = hugetlb_fault_mutex_hash(mapping, index, 0); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* @@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct f addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ - hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); + hash = hugetlb_fault_mutex_hash(mapping, index, addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ --- a/include/linux/hugetlb.h~hugetlb-remove-unused-hstate-in-hugetlb_fault_mutex_hash +++ a/include/linux/hugetlb.h @@ -105,8 +105,8 @@ void move_hugetlb_state(struct page *old void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx, unsigned long address); +u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx, + unsigned long address); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); --- a/mm/hugetlb.c~hugetlb-remove-unused-hstate-in-hugetlb_fault_mutex_hash +++ a/mm/hugetlb.c @@ -3870,7 +3870,7 @@ retry: * handling userfault. Reacquire after handling * fault to make calling code simpler. */ - hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); + hash = hugetlb_fault_mutex_hash(mapping, idx, haddr); mutex_unlock(&hugetlb_fault_mutex_table[hash]); ret = handle_userfault(&vmf, VM_UFFD_MISSING); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -3997,8 +3997,8 @@ backout_unlocked: } #ifdef CONFIG_SMP -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx, unsigned long address) +u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx, + unsigned long address) { unsigned long key[2]; u32 hash; @@ -4015,8 +4015,8 @@ u32 hugetlb_fault_mutex_hash(struct hsta * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx, unsigned long address) +u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx, + unsigned long address) { return 0; } @@ -4060,7 +4060,7 @@ vm_fault_t hugetlb_fault(struct mm_struc * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); + hash = hugetlb_fault_mutex_hash(mapping, idx, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); --- a/mm/userfaultfd.c~hugetlb-remove-unused-hstate-in-hugetlb_fault_mutex_hash +++ a/mm/userfaultfd.c @@ -184,7 +184,6 @@ static __always_inline ssize_t __mcopy_a unsigned long src_addr, dst_addr; long copied; struct page *page; - struct hstate *h; unsigned long vma_hpagesize; pgoff_t idx; u32 hash; @@ -256,8 +255,6 @@ retry: goto out_unlock; } - h = hstate_vma(dst_vma); - while (src_addr < src_start + len) { pte_t dst_pteval; @@ -269,7 +266,7 @@ retry: */ idx = linear_page_index(dst_vma, dst_addr); mapping = dst_vma->vm_file->f_mapping; - hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); + hash = hugetlb_fault_mutex_hash(mapping, idx, dst_addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); err = -ENOMEM; _ Patches currently in -mm which might be from richardw.yang@xxxxxxxxxxxxxxx are mm-fix-typo-in-the-comment-when-calling-function-__setpageuptodate.patch mm-mmapc-remove-a-never-trigger-warning-in-__vma_adjust.patch userfaultfd-use-vma_pagesize-for-all-huge-page-size-calculation.patch userfaultfd-remove-unnecessary-warn_on-in-__mcopy_atomic_hugetlb.patch userfaultfd-wrap-the-common-dst_vma-check-into-an-inlined-function.patch fs-userfaultfdc-simplify-the-calculation-of-new_flags.patch