Re: [vbabka-slab:for-6.1/common_kmalloc 12/17] mm/slab_common.c:1023:7: warning: no previous prototype for '__kmalloc_large_node'

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Aug 24, 2022 at 10:46:55AM +0800, kernel test robot wrote:
> tree:   git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git for-6.1/common_kmalloc
> head:   b261334803b44092acd06be3c9f32c46af818359
> commit: 79c7527b9805edf14c952deca45de60a8a06a414 [12/17] mm/sl[au]b: generalize kmalloc subsystem
> config: x86_64-randconfig-a015 (https://download.01.org/0day-ci/archive/20220824/202208241004.jZykmQOH-lkp@xxxxxxxxx/config)
> compiler: gcc-11 (Debian 11.3.0-5) 11.3.0
> reproduce (this is a W=1 build):
>         # https://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git/commit/?id=79c7527b9805edf14c952deca45de60a8a06a414
>         git remote add vbabka-slab git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
>         git fetch --no-tags vbabka-slab for-6.1/common_kmalloc
>         git checkout 79c7527b9805edf14c952deca45de60a8a06a414
>         # save the config file
>         mkdir build_dir && cp config build_dir/.config
>         make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
> 
> If you fix the issue, kindly add following tag where applicable
> Reported-by: kernel test robot <lkp@xxxxxxxxx>
> 
> All warnings (new ones prefixed by >>):
> 
> >> mm/slab_common.c:1023:7: warning: no previous prototype for '__kmalloc_large_node' [-Wmissing-prototypes]
>     1023 | void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
>          |       ^~~~~~~~~~~~~~~~~~~~

Oh, I did not add static here.

Please pull:
	https://github.com/hygoni/linux.git slab-common-v4r1

Fixed warning above and also fixed wrong comment (@objp in comment but
its name is actually 'object')

git range-diff	for-6.1/common_kmalloc~17...for-6.1/common_kmalloc \
		slab-common-v4r1~17...slab-common-v4r1:

		 1:  0276f0da97e3 =  1:  0276f0da97e3 mm/slab: move NUMA-related code to __do_cache_alloc()
 2:  d5ea00e8d8c9 =  2:  d5ea00e8d8c9 mm/slab: cleanup slab_alloc() and slab_alloc_node()
 3:  48c55c42e6b8 =  3:  48c55c42e6b8 mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions
 4:  cd8523b488ec =  4:  cd8523b488ec mm/slab_common: cleanup kmalloc_track_caller()
 5:  0b92d497e03a =  5:  0b92d497e03a mm/sl[au]b: factor out __do_kmalloc_node()
 6:  d43649c0f472 =  6:  d43649c0f472 mm/slab_common: fold kmalloc_order_trace() into kmalloc_large()
 7:  cd6d756d6118 =  7:  cd6d756d6118 mm/slub: move kmalloc_large_node() to slab_common.c
 8:  fe8f3819416e !  8:  ec277200c5dd mm/slab_common: kmalloc_node: pass large requests to page allocator
    @@ mm/slab_common.c: void *kmalloc_large(size_t size, gfp_t flags)
      EXPORT_SYMBOL(kmalloc_large);

     -void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    -+void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    ++static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      {
      	struct page *page;
      	void *ptr = NULL;
 9:  cc40615623ed !  9:  3d1d49576f4a mm/slab_common: cleanup kmalloc_large()
    @@ mm/slab_common.c: gfp_t kmalloc_fix_flags(gfp_t flags)
     -}
     -EXPORT_SYMBOL(kmalloc_large);

    - void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    + static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      {
    -@@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    +@@ mm/slab_common.c: static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      	void *ptr = NULL;
      	unsigned int order = get_order(size);

    @@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int
      	flags |= __GFP_COMP;
      	page = alloc_pages_node(node, flags, order);
      	if (page) {
    -@@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    +@@ mm/slab_common.c: static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      	return ptr;
      }

10:  e14d748cf9ad = 10:  d6d55b2e745a mm/slab: kmalloc: pass requests larger than order-1 page to page allocator
11:  84000279b448 = 11:  28c1aabc9f73 mm/sl[au]b: introduce common alloc/free functions without tracepoint
12:  79c7527b9805 ! 12:  7fefa4235ba9 mm/sl[au]b: generalize kmalloc subsystem
    @@ mm/slab_common.c: void free_large_kmalloc(struct folio *folio, void *object)
     +
     +/**
     + * kfree - free previously allocated memory
    -+ * @objp: pointer returned by kmalloc.
    ++ * @object: pointer returned by kmalloc.
     + *
    -+ * If @objp is NULL, no operation is performed.
    ++ * If @object is NULL, no operation is performed.
     + *
     + * Don't free memory not originally allocated by kmalloc()
     + * or you will run into trouble.
    @@ mm/slab_common.c: void free_large_kmalloc(struct folio *folio, void *object)
     +
     +/**
     + * __ksize -- Uninstrumented ksize.
    -+ * @objp: pointer to the object
    ++ * @object: pointer to the object
     + *
     + * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
     + * safety checks as ksize() with KASAN instrumentation enabled.
     + *
    -+ * Return: size of the actual memory used by @objp in bytes
    ++ * Return: size of the actual memory used by @object in bytes
     + */
     +size_t __ksize(const void *object)
     +{
    @@ mm/slab_common.c: gfp_t kmalloc_fix_flags(gfp_t flags)
       * know the allocation order to free the pages properly in kfree.
       */

    --void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    -+void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
    +-static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    ++static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
      {
      	struct page *page;
      	void *ptr = NULL;
    -@@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    +@@ mm/slab_common.c: static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)

      void *kmalloc_large(size_t size, gfp_t flags)
      {
13:  31be83f97c43 = 13:  446064fdf403 mm/sl[au]b: cleanup kmem_cache_alloc[_node]_trace()
14:  583b9ef311da = 14:  c923544d6d61 mm/slab_common: unify NUMA and UMA version of tracepoints
15:  d0b3552d07e0 = 15:  72633319472e mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using
16:  0db36c104255 ! 16:  c9b5ded32cc6 mm/slab_common: move declaration of __ksize() to mm/slab.h
    @@ mm/slab_common.c: void kfree(const void *object)

     -/**
     - * __ksize -- Uninstrumented ksize.
    -- * @objp: pointer to the object
    +- * @object: pointer to the object
     - *
     - * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
     - * safety checks as ksize() with KASAN instrumentation enabled.
     - *
    -- * Return: size of the actual memory used by @objp in bytes
    +- * Return: size of the actual memory used by @object in bytes
     - */
     +/* Uninstrumented ksize. Only called by KASAN. */
      size_t __ksize(const void *object)
17:  b261334803b4 = 17:  0248c8a1af52 mm/sl[au]b: check if large object is valid in __ksize()


> vim +/__kmalloc_large_node +1023 mm/slab_common.c
> 
>   1016	
>   1017	/*
>   1018	 * To avoid unnecessary overhead, we pass through large allocation requests
>   1019	 * directly to the page allocator. We use __GFP_COMP, because we will need to
>   1020	 * know the allocation order to free the pages properly in kfree.
>   1021	 */
>   1022	
> > 1023	void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
>   1024	{
>   1025		struct page *page;
>   1026		void *ptr = NULL;
>   1027		unsigned int order = get_order(size);
>   1028	
>   1029		if (unlikely(flags & GFP_SLAB_BUG_MASK))
>   1030			flags = kmalloc_fix_flags(flags);
>   1031	
>   1032		flags |= __GFP_COMP;
>   1033		page = alloc_pages_node(node, flags, order);
>   1034		if (page) {
>   1035			ptr = page_address(page);
>   1036			mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
>   1037					      PAGE_SIZE << order);
>   1038		}
>   1039	
>   1040		ptr = kasan_kmalloc_large(ptr, size, flags);
>   1041		/* As ptr might get tagged, call kmemleak hook after KASAN. */
>   1042		kmemleak_alloc(ptr, size, 1, flags);
>   1043	
>   1044		return ptr;
>   1045	}
>   1046	
> 
> -- 
> 0-DAY CI Kernel Test Service
> https://01.org/lkp

-- 
Thanks,
Hyeonggon




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux