[rgushchin:kmem_reparent.4 170/380] mm/mprotect.c:138:19: error: 'struct vm_area_struct' has no member named 'mm'

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



tree:   https://github.com/rgushchin/linux.git kmem_reparent.4
head:   595d92aaebb6a603b2820ce7188b6db971693d85
commit: c7b45943bdf12b5ccfcd016538c62aa7edd604d5 [170/380] mm/mprotect.c: fix compilation warning because of unused 'mm' varaible
config: i386-randconfig-x070-201919 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        git checkout c7b45943bdf12b5ccfcd016538c62aa7edd604d5
        # save the attached .config to linux build tree
        make ARCH=i386 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@xxxxxxxxx>

Note: the rgushchin/kmem_reparent.4 HEAD 595d92aaebb6a603b2820ce7188b6db971693d85 builds fine.
      It only hurts bisectibility.

All errors (new ones prefixed by >>):

   In file included from include/linux/mm.h:99:0,
                    from mm/mprotect.c:12:
   mm/mprotect.c: In function 'change_pte_range':
>> mm/mprotect.c:138:19: error: 'struct vm_area_struct' has no member named 'mm'
        set_pte_at(vma->mm, addr, pte, newpte);
                      ^
   arch/x86/include/asm/pgtable.h:64:59: note: in definition of macro 'set_pte_at'
    #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
                                                              ^~
>> mm/mprotect.c:152:16: error: 'mm' undeclared (first use in this function); did you mean 'hmm'?
        set_pte_at(mm, addr, pte, newpte);
                   ^
   arch/x86/include/asm/pgtable.h:64:59: note: in definition of macro 'set_pte_at'
    #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
                                                              ^~
   mm/mprotect.c:152:16: note: each undeclared identifier is reported only once for each function it appears in
        set_pte_at(mm, addr, pte, newpte);
                   ^
   arch/x86/include/asm/pgtable.h:64:59: note: in definition of macro 'set_pte_at'
    #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
                                                              ^~

vim +138 mm/mprotect.c

  > 12	#include <linux/mm.h>
    13	#include <linux/hugetlb.h>
    14	#include <linux/shm.h>
    15	#include <linux/mman.h>
    16	#include <linux/fs.h>
    17	#include <linux/highmem.h>
    18	#include <linux/security.h>
    19	#include <linux/mempolicy.h>
    20	#include <linux/personality.h>
    21	#include <linux/syscalls.h>
    22	#include <linux/swap.h>
    23	#include <linux/swapops.h>
    24	#include <linux/mmu_notifier.h>
    25	#include <linux/migrate.h>
    26	#include <linux/perf_event.h>
    27	#include <linux/pkeys.h>
    28	#include <linux/ksm.h>
    29	#include <linux/uaccess.h>
    30	#include <linux/mm_inline.h>
    31	#include <asm/pgtable.h>
    32	#include <asm/cacheflush.h>
    33	#include <asm/mmu_context.h>
    34	#include <asm/tlbflush.h>
    35	
    36	#include "internal.h"
    37	
    38	static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
    39			unsigned long addr, unsigned long end, pgprot_t newprot,
    40			int dirty_accountable, int prot_numa)
    41	{
    42		pte_t *pte, oldpte;
    43		spinlock_t *ptl;
    44		unsigned long pages = 0;
    45		int target_node = NUMA_NO_NODE;
    46	
    47		/*
    48		 * Can be called with only the mmap_sem for reading by
    49		 * prot_numa so we must check the pmd isn't constantly
    50		 * changing from under us from pmd_none to pmd_trans_huge
    51		 * and/or the other way around.
    52		 */
    53		if (pmd_trans_unstable(pmd))
    54			return 0;
    55	
    56		/*
    57		 * The pmd points to a regular pte so the pmd can't change
    58		 * from under us even if the mmap_sem is only hold for
    59		 * reading.
    60		 */
    61		pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    62	
    63		/* Get target node for single threaded private VMAs */
    64		if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
    65		    atomic_read(&vma->vm_mm->mm_users) == 1)
    66			target_node = numa_node_id();
    67	
    68		flush_tlb_batched_pending(vma->vm_mm);
    69		arch_enter_lazy_mmu_mode();
    70		do {
    71			oldpte = *pte;
    72			if (pte_present(oldpte)) {
    73				pte_t ptent;
    74				bool preserve_write = prot_numa && pte_write(oldpte);
    75	
    76				/*
    77				 * Avoid trapping faults against the zero or KSM
    78				 * pages. See similar comment in change_huge_pmd.
    79				 */
    80				if (prot_numa) {
    81					struct page *page;
    82	
    83					page = vm_normal_page(vma, addr, oldpte);
    84					if (!page || PageKsm(page))
    85						continue;
    86	
    87					/* Also skip shared copy-on-write pages */
    88					if (is_cow_mapping(vma->vm_flags) &&
    89					    page_mapcount(page) != 1)
    90						continue;
    91	
    92					/*
    93					 * While migration can move some dirty pages,
    94					 * it cannot move them all from MIGRATE_ASYNC
    95					 * context.
    96					 */
    97					if (page_is_file_cache(page) && PageDirty(page))
    98						continue;
    99	
   100					/* Avoid TLB flush if possible */
   101					if (pte_protnone(oldpte))
   102						continue;
   103	
   104					/*
   105					 * Don't mess with PTEs if page is already on the node
   106					 * a single-threaded process is running on.
   107					 */
   108					if (target_node == page_to_nid(page))
   109						continue;
   110				}
   111	
   112				oldpte = ptep_modify_prot_start(vma, addr, pte);
   113				ptent = pte_modify(oldpte, newprot);
   114				if (preserve_write)
   115					ptent = pte_mk_savedwrite(ptent);
   116	
   117				/* Avoid taking write faults for known dirty pages */
   118				if (dirty_accountable && pte_dirty(ptent) &&
   119						(pte_soft_dirty(ptent) ||
   120						 !(vma->vm_flags & VM_SOFTDIRTY))) {
   121					ptent = pte_mkwrite(ptent);
   122				}
   123				ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
   124				pages++;
   125			} else if (IS_ENABLED(CONFIG_MIGRATION)) {
   126				swp_entry_t entry = pte_to_swp_entry(oldpte);
   127	
   128				if (is_write_migration_entry(entry)) {
   129					pte_t newpte;
   130					/*
   131					 * A protection check is difficult so
   132					 * just be safe and disable write
   133					 */
   134					make_migration_entry_read(&entry);
   135					newpte = swp_entry_to_pte(entry);
   136					if (pte_swp_soft_dirty(oldpte))
   137						newpte = pte_swp_mksoft_dirty(newpte);
 > 138					set_pte_at(vma->mm, addr, pte, newpte);
   139	
   140					pages++;
   141				}
   142	
   143				if (is_write_device_private_entry(entry)) {
   144					pte_t newpte;
   145	
   146					/*
   147					 * We do not preserve soft-dirtiness. See
   148					 * copy_one_pte() for explanation.
   149					 */
   150					make_device_private_entry_read(&entry);
   151					newpte = swp_entry_to_pte(entry);
 > 152					set_pte_at(mm, addr, pte, newpte);
   153	
   154					pages++;
   155				}
   156			}
   157		} while (pte++, addr += PAGE_SIZE, addr != end);
   158		arch_leave_lazy_mmu_mode();
   159		pte_unmap_unlock(pte - 1, ptl);
   160	
   161		return pages;
   162	}
   163	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux