These two functions take array of pointer to struct page. Make them take array of pointer to zsdesc instead of page. Add silly type casting when calling them which. Casting will be removed in the next patch. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index dc6a7130cdfd..821d72ab888c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1361,7 +1361,7 @@ static inline void __zs_cpu_down(struct mapping_area *area) } static void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) + struct zsdesc *zsdescs[2], int off, int size) { int sizes[2]; void *addr; @@ -1378,10 +1378,10 @@ static void *__zs_map_object(struct mapping_area *area, sizes[1] = size - sizes[0]; /* copy object to per-cpu buffer */ - addr = kmap_atomic(pages[0]); + addr = zsdesc_kmap_atomic(zsdescs[0]); memcpy(buf, addr + off, sizes[0]); kunmap_atomic(addr); - addr = kmap_atomic(pages[1]); + addr = zsdesc_kmap_atomic(zsdescs[1]); memcpy(buf + sizes[0], addr, sizes[1]); kunmap_atomic(addr); out: @@ -1389,7 +1389,7 @@ static void *__zs_map_object(struct mapping_area *area, } static void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) + struct zsdesc *zsdescs[2], int off, int size) { int sizes[2]; void *addr; @@ -1408,10 +1408,10 @@ static void __zs_unmap_object(struct mapping_area *area, sizes[1] = size - sizes[0]; /* copy per-cpu buffer to object */ - addr = kmap_atomic(pages[0]); + addr = zsdesc_kmap_atomic(zsdescs[0]); memcpy(addr + off, buf, sizes[0]); kunmap_atomic(addr); - addr = kmap_atomic(pages[1]); + addr = zsdesc_kmap_atomic(zsdescs[1]); memcpy(addr, buf + sizes[0], sizes[1]); kunmap_atomic(addr); @@ -1572,7 +1572,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, pages[1] = get_next_page(page); BUG_ON(!pages[1]); - ret = __zs_map_object(area, pages, off, class->size); + ret = __zs_map_object(area, (struct zsdesc **)pages, off, class->size); out: if (likely(!ZsHugePage(zspage))) ret += ZS_HANDLE_SIZE; @@ -1607,7 +1607,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) pages[1] = get_next_page(page); BUG_ON(!pages[1]); - __zs_unmap_object(area, pages, off, class->size); + __zs_unmap_object(area, (struct zsdesc **)pages, off, class->size); } local_unlock(&zs_map_area.lock); -- 2.25.1