Convert obj_to_location() to take zsdesc and also convert its users to use zsdesc. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 75 ++++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index be3b8734bdf2..f5a20c20ec19 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -864,16 +864,16 @@ static __maybe_unused struct zsdesc *get_next_zsdesc(struct zsdesc *zsdesc) } /** - * obj_to_location - get (<page>, <obj_idx>) from encoded object value + * obj_to_location - get (<zsdesc>, <obj_idx>) from encoded object value * @obj: the encoded object value - * @page: page object resides in zspage + * @zsdesc: zsdesc object resides in zspage * @obj_idx: object index */ -static void obj_to_location(unsigned long obj, struct page **page, +static void obj_to_location(unsigned long obj, struct zsdesc **zsdesc, unsigned int *obj_idx) { obj >>= OBJ_TAG_BITS; - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *zsdesc = pfn_zsdesc(obj >> OBJ_INDEX_BITS); *obj_idx = (obj & OBJ_INDEX_MASK); } @@ -1302,13 +1302,13 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm) { struct zspage *zspage; - struct page *page; + struct zsdesc *zsdesc; unsigned long obj, off; unsigned int obj_idx; struct size_class *class; struct mapping_area *area; - struct page *pages[2]; + struct zsdesc *zsdescs[2]; void *ret; /* @@ -1321,8 +1321,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, /* It guarantees it can get zspage from handle safely */ spin_lock(&pool->lock); obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zsdesc, &obj_idx); + zspage = get_zspage(zsdesc_page(zsdesc)); /* * migration cannot move any zpages in this zspage. Here, pool->lock @@ -1341,17 +1341,17 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ - area->vm_addr = kmap_atomic(page); + area->vm_addr = zsdesc_kmap_atomic(zsdesc); ret = area->vm_addr + off; goto out; } /* this object spans two pages */ - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zsdescs[0] = zsdesc; + zsdescs[1] = get_next_zsdesc(zsdesc); + BUG_ON(!zsdescs[1]); - ret = __zs_map_object(area, (struct zsdesc **)pages, off, class->size); + ret = __zs_map_object(area, zsdescs, off, class->size); out: if (likely(!ZsHugePage(zspage))) ret += ZS_HANDLE_SIZE; @@ -1363,7 +1363,7 @@ EXPORT_SYMBOL_GPL(zs_map_object); void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; - struct page *page; + struct zsdesc *zsdesc; unsigned long obj, off; unsigned int obj_idx; @@ -1371,8 +1371,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) struct mapping_area *area; obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zsdesc, &obj_idx); + zspage = get_zspage(zsdesc_page(zsdesc)); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1380,13 +1380,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) if (off + class->size <= PAGE_SIZE) kunmap_atomic(area->vm_addr); else { - struct page *pages[2]; + struct zsdesc *zsdescs[2]; - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zsdescs[0] = zsdesc; + zsdescs[1] = get_next_zsdesc(zsdesc); + BUG_ON(!zsdescs[1]); - __zs_unmap_object(area, (struct zsdesc **)pages, off, class->size); + __zs_unmap_object(area, zsdescs, off, class->size); } local_unlock(&zs_map_area.lock); @@ -1528,23 +1528,24 @@ static void obj_free(int class_size, unsigned long obj) { struct link_free *link; struct zspage *zspage; - struct page *f_page; + struct zsdesc *f_zsdesc; unsigned long f_offset; unsigned int f_objidx; void *vaddr; - obj_to_location(obj, &f_page, &f_objidx); + + obj_to_location(obj, &f_zsdesc, &f_objidx); f_offset = offset_in_page(class_size * f_objidx); - zspage = get_zspage(f_page); + zspage = get_zspage(zsdesc_page(f_zsdesc)); - vaddr = kmap_atomic(f_page); + vaddr = zsdesc_kmap_atomic(f_zsdesc); link = (struct link_free *)(vaddr + f_offset); /* Insert this object in containing zspage's freelist */ if (likely(!ZsHugePage(zspage))) link->next = get_freeobj(zspage) << OBJ_TAG_BITS; else - f_page->index = 0; + f_zsdesc->next = NULL; set_freeobj(zspage, f_objidx); kunmap_atomic(vaddr); @@ -1587,7 +1588,7 @@ EXPORT_SYMBOL_GPL(zs_free); static void zs_object_copy(struct size_class *class, unsigned long dst, unsigned long src) { - struct page *s_page, *d_page; + struct zsdesc *s_zsdesc, *d_zsdesc; unsigned int s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; @@ -1596,8 +1597,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, s_size = d_size = class->size; - obj_to_location(src, &s_page, &s_objidx); - obj_to_location(dst, &d_page, &d_objidx); + obj_to_location(src, &s_zsdesc, &s_objidx); + obj_to_location(dst, &d_zsdesc, &d_objidx); s_off = offset_in_page(class->size * s_objidx); d_off = offset_in_page(class->size * d_objidx); @@ -1608,8 +1609,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, if (d_off + class->size > PAGE_SIZE) d_size = PAGE_SIZE - d_off; - s_addr = kmap_atomic(s_page); - d_addr = kmap_atomic(d_page); + s_addr = zsdesc_kmap_atomic(s_zsdesc); + d_addr = zsdesc_kmap_atomic(d_zsdesc); while (1) { size = min(s_size, d_size); @@ -1634,17 +1635,17 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, if (s_off >= PAGE_SIZE) { kunmap_atomic(d_addr); kunmap_atomic(s_addr); - s_page = get_next_page(s_page); - s_addr = kmap_atomic(s_page); - d_addr = kmap_atomic(d_page); + s_zsdesc = get_next_zsdesc(s_zsdesc); + s_addr = zsdesc_kmap_atomic(s_zsdesc); + d_addr = zsdesc_kmap_atomic(d_zsdesc); s_size = class->size - written; s_off = 0; } if (d_off >= PAGE_SIZE) { kunmap_atomic(d_addr); - d_page = get_next_page(d_page); - d_addr = kmap_atomic(d_page); + d_zsdesc = get_next_zsdesc(d_zsdesc); + d_addr = zsdesc_kmap_atomic(d_zsdesc); d_size = class->size - written; d_off = 0; } @@ -1910,7 +1911,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, struct zs_pool *pool; struct size_class *class; struct zspage *zspage; - struct page *dummy; + struct zsdesc *dummy; void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; -- 2.39.3