Currently zsmalloc reuses fields of struct page. As part of simplifying struct page, create own type for zsmalloc, called zsdesc. Remove comments about how zsmalloc reuses fields of struct page, because zsdesc uses more intuitive names. Note that zsmalloc does not use PG_owner_priv_v1 after commit a41ec880aa7b ("zsmalloc: move huge compressed obj from page to zspage"). Thus only document how zsmalloc uses PG_private flag. It is very tempting to rearrange zsdesc, but the three words after flags field are not available for zsmalloc. Add comments about that. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/zsmalloc.c | 63 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 3aed46ab7e6c..e2e34992c439 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -11,23 +11,6 @@ * Released under the terms of GNU General Public License Version 2.0 */ -/* - * Following is how we use various fields and flags of underlying - * struct page(s) to form a zspage. - * - * Usage of struct page fields: - * page->private: points to zspage - * page->index: links together all component pages of a zspage - * For the huge page, this is always 0, so we use this field - * to store handle. - * page->page_type: first object offset in a subpage of zspage - * - * Usage of struct page flags: - * PG_private: identifies the first component page - * PG_owner_priv_1: identifies the huge component page - * - */ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* @@ -303,6 +286,52 @@ struct mapping_area { enum zs_mapmode vm_mm; /* mapping mode */ }; +/* + * struct zsdesc - memory descriptor for zsmalloc memory + * + * This struct overlays struct page for now. Do not modify without a + * good understanding of the issues. + * + * Usage of struct page flags on zsdesc: + * PG_private: identifies the first component zsdesc + */ +struct zsdesc { + unsigned long flags; + + /* + * Although not used by zsmalloc, this field is used by non-LRU page migration + * code. Leave it unused. + */ + struct list_head lru; + + /* Always points to zsmalloc_mops with PAGE_MAPPING_MOVABLE set */ + struct movable_operations *mops; + + union { + /* linked list of all zsdescs in a zspage */ + struct zsdesc *next; + /* for huge zspages */ + unsigned long handle; + }; + struct zspage *zspage; + unsigned int first_obj_offset; + unsigned int _refcount; +}; + +#define ZSDESC_MATCH(pg, zs) \ + static_assert(offsetof(struct page, pg) == offsetof(struct zsdesc, zs)) + +ZSDESC_MATCH(flags, flags); +ZSDESC_MATCH(lru, lru); +ZSDESC_MATCH(mapping, mops); +ZSDESC_MATCH(index, next); +ZSDESC_MATCH(index, handle); +ZSDESC_MATCH(private, zspage); +ZSDESC_MATCH(page_type, first_obj_offset); +ZSDESC_MATCH(_refcount, _refcount); +#undef ZSDESC_MATCH +static_assert(sizeof(struct zsdesc) <= sizeof(struct page)); + /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ static void SetZsHugePage(struct zspage *zspage) { -- 2.25.1