On (22/11/18 16:15), Nhat Pham wrote: > +static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries) > +{ > + int i, obj_idx, ret = 0; > + unsigned long handle; > + struct zspage *zspage; > + struct page *page; > + enum fullness_group fullness; > + > + /* Lock LRU and fullness list */ > + spin_lock(&pool->lock); > + if (list_empty(&pool->lru)) { > + spin_unlock(&pool->lock); > + return -EINVAL; > + } > + > + for (i = 0; i < retries; i++) { > + struct size_class *class; > + > + zspage = list_last_entry(&pool->lru, struct zspage, lru); > + list_del(&zspage->lru); > + > + /* zs_free may free objects, but not the zspage and handles */ > + zspage->under_reclaim = true; > + > + class = zspage_class(pool, zspage); > + fullness = get_fullness_group(class, zspage); > + > + /* Lock out object allocations and object compaction */ > + remove_zspage(class, zspage, fullness); > + > + spin_unlock(&pool->lock); > + > + /* Lock backing pages into place */ > + lock_zspage(zspage); > + > + obj_idx = 0; > + page = zspage->first_page; A nit: we usually call get_first_page() in such cases. > + while (1) { > + handle = find_alloced_obj(class, page, &obj_idx); > + if (!handle) { > + page = get_next_page(page); > + if (!page) > + break; > + obj_idx = 0; > + continue; > + } > + > + /* > + * This will write the object and call zs_free. > + * > + * zs_free will free the object, but the > + * under_reclaim flag prevents it from freeing > + * the zspage altogether. This is necessary so > + * that we can continue working with the > + * zspage potentially after the last object > + * has been freed. > + */ > + ret = pool->zpool_ops->evict(pool->zpool, handle); > + if (ret) > + goto next; > + > + obj_idx++; > + }