From: Christoph Hellwig <hch@xxxxxx> Subject: mm: pass addr as unsigned long to vb_free Ever use of addr in vb_free casts to unsigned long first, and the caller has an unsigned long version of the address available anyway. Just pass that and avoid all the casts. Link: http://lkml.kernel.org/r/20200414131348.444715-13-hch@xxxxxx Signed-off-by: Christoph Hellwig <hch@xxxxxx> Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxx> Cc: Daniel Vetter <daniel@xxxxxxxx> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> Cc: David Airlie <airlied@xxxxxxxx> Cc: Gao Xiang <xiang@xxxxxxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: Haiyang Zhang <haiyangz@xxxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: "K. Y. Srinivasan" <kys@xxxxxxxxxxxxx> Cc: Laura Abbott <labbott@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Michael Kelley <mikelley@xxxxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Nitin Gupta <ngupta@xxxxxxxxxx> Cc: Robin Murphy <robin.murphy@xxxxxxx> Cc: Sakari Ailus <sakari.ailus@xxxxxxxxxxxxxxx> Cc: Stephen Hemminger <sthemmin@xxxxxxxxxxxxx> Cc: Sumit Semwal <sumit.semwal@xxxxxxxxxx> Cc: Wei Liu <wei.liu@xxxxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/vmalloc.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) --- a/mm/vmalloc.c~mm-pass-addr-as-unsigned-long-to-vb_free +++ a/mm/vmalloc.c @@ -1665,7 +1665,7 @@ static void *vb_alloc(unsigned long size return vaddr; } -static void vb_free(const void *addr, unsigned long size) +static void vb_free(unsigned long addr, unsigned long size) { unsigned long offset; unsigned long vb_idx; @@ -1675,24 +1675,22 @@ static void vb_free(const void *addr, un BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); - flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); + flush_cache_vunmap(addr, addr + size); order = get_order(size); - offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); - offset >>= PAGE_SHIFT; + offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; - vb_idx = addr_to_vb_idx((unsigned long)addr); + vb_idx = addr_to_vb_idx(addr); rcu_read_lock(); vb = radix_tree_lookup(&vmap_block_tree, vb_idx); rcu_read_unlock(); BUG_ON(!vb); - vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); + vunmap_page_range(addr, addr + size); if (debug_pagealloc_enabled_static()) - flush_tlb_kernel_range((unsigned long)addr, - (unsigned long)addr + size); + flush_tlb_kernel_range(addr, addr + size); spin_lock(&vb->lock); @@ -1792,7 +1790,7 @@ void vm_unmap_ram(const void *mem, unsig if (likely(count <= VMAP_MAX_ALLOC)) { debug_check_no_locks_freed(mem, size); - vb_free(mem, size); + vb_free(addr, size); return; } _