[vfs:misc.ppc 1/6] arch/powerpc/mm/subpage-prot.c:189:30: error: expected ')' before 'unsigned'

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



tree:   https://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs.git misc.ppc
head:   ceccb992ad51cea346449b350eca7de0d22ab207
commit: 6ac4156f6ed1d8a6e0c232aeeeecb0088251d5e0 [1/6] ppc: switch trivial cases to SYSCALL_DEFINE
config: powerpc-powernv_defconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        git checkout 6ac4156f6ed1d8a6e0c232aeeeecb0088251d5e0
        # save the attached .config to linux build tree
        make.cross ARCH=powerpc 

All errors (new ones prefixed by >>):

>> arch/powerpc/mm/subpage-prot.c:189:30: error: expected ')' before 'unsigned'
    SYSCALL_DEFIN3(subpage_prot, unsigned long, addr,
                                 ^~~~~~~~
>> arch/powerpc/mm/subpage-prot.c:143:13: error: 'subpage_mark_vma_nohuge' defined but not used [-Werror=unused-function]
    static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
                ^~~~~~~~~~~~~~~~~~~~~~~
>> arch/powerpc/mm/subpage-prot.c:94:13: error: 'subpage_prot_clear' defined but not used [-Werror=unused-function]
    static void subpage_prot_clear(unsigned long addr, unsigned long len)
                ^~~~~~~~~~~~~~~~~~
   cc1: all warnings being treated as errors

vim +189 arch/powerpc/mm/subpage-prot.c

    89	
    90	/*
    91	 * Clear the subpage protection map for an address range, allowing
    92	 * all accesses that are allowed by the pte permissions.
    93	 */
  > 94	static void subpage_prot_clear(unsigned long addr, unsigned long len)
    95	{
    96		struct mm_struct *mm = current->mm;
    97		struct subpage_prot_table *spt = &mm->context.spt;
    98		u32 **spm, *spp;
    99		unsigned long i;
   100		size_t nw;
   101		unsigned long next, limit;
   102	
   103		down_write(&mm->mmap_sem);
   104		limit = addr + len;
   105		if (limit > spt->maxaddr)
   106			limit = spt->maxaddr;
   107		for (; addr < limit; addr = next) {
   108			next = pmd_addr_end(addr, limit);
   109			if (addr < 0x100000000UL) {
   110				spm = spt->low_prot;
   111			} else {
   112				spm = spt->protptrs[addr >> SBP_L3_SHIFT];
   113				if (!spm)
   114					continue;
   115			}
   116			spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
   117			if (!spp)
   118				continue;
   119			spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
   120	
   121			i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
   122			nw = PTRS_PER_PTE - i;
   123			if (addr + (nw << PAGE_SHIFT) > next)
   124				nw = (next - addr) >> PAGE_SHIFT;
   125	
   126			memset(spp, 0, nw * sizeof(u32));
   127	
   128			/* now flush any existing HPTEs for the range */
   129			hpte_flush_range(mm, addr, nw);
   130		}
   131		up_write(&mm->mmap_sem);
   132	}
   133	
   134	#ifdef CONFIG_TRANSPARENT_HUGEPAGE
   135	static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
   136					  unsigned long end, struct mm_walk *walk)
   137	{
   138		struct vm_area_struct *vma = walk->vma;
   139		split_huge_pmd(vma, pmd, addr);
   140		return 0;
   141	}
   142	
 > 143	static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
   144					    unsigned long len)
   145	{
   146		struct vm_area_struct *vma;
   147		struct mm_walk subpage_proto_walk = {
   148			.mm = mm,
   149			.pmd_entry = subpage_walk_pmd_entry,
   150		};
   151	
   152		/*
   153		 * We don't try too hard, we just mark all the vma in that range
   154		 * VM_NOHUGEPAGE and split them.
   155		 */
   156		vma = find_vma(mm, addr);
   157		/*
   158		 * If the range is in unmapped range, just return
   159		 */
   160		if (vma && ((addr + len) <= vma->vm_start))
   161			return;
   162	
   163		while (vma) {
   164			if (vma->vm_start >= (addr + len))
   165				break;
   166			vma->vm_flags |= VM_NOHUGEPAGE;
   167			walk_page_vma(vma, &subpage_proto_walk);
   168			vma = vma->vm_next;
   169		}
   170	}
   171	#else
   172	static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
   173					    unsigned long len)
   174	{
   175		return;
   176	}
   177	#endif
   178	
   179	/*
   180	 * Copy in a subpage protection map for an address range.
   181	 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
   182	 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
   183	 * 2 or 3 to prevent all accesses.
   184	 * Note that the normal page protections also apply; the subpage
   185	 * protection mechanism is an additional constraint, so putting 0
   186	 * in a 2-bit field won't allow writes to a page that is otherwise
   187	 * write-protected.
   188	 */
 > 189	SYSCALL_DEFIN3(subpage_prot, unsigned long, addr,

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux