This is a note to let you know that I've just added the patch titled mm: add private field of first tail to struct page and struct folio to the 6.1-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: mm-add-private-field-of-first-tail-to-struct-page-an.patch and it can be found in the queue-6.1 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 7ecde6b7accded68174f807406cd1043927150c6 Author: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> Date: Thu Sep 22 10:42:04 2022 -0500 mm: add private field of first tail to struct page and struct folio [ Upstream commit d340625f4849ab5dbfebbc7d84709fbfcd39e52f ] Allow struct folio to store hugetlb metadata that is contained in the private field of the first tail page. On 32-bit, _private_1 aligns with page[1].private. Link: https://lkml.kernel.org/r/20220922154207.1575343-3-sidhartha.kumar@xxxxxxxxxx Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> Acked-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Colin Cross <ccross@xxxxxxxxxx> Cc: David Howells <dhowells@xxxxxxxxxx> Cc: "Eric W . Biederman" <ebiederm@xxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: kernel test robot <lkp@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Muchun Song <songmuchun@xxxxxxxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: William Kucharski <william.kucharski@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Stable-dep-of: b76b46902c2d ("mm/hugetlb: fix missing hugetlb_lock for resv uncharge") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 247aedb18d5c3..a9c1d611029d1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -144,6 +144,7 @@ struct page { atomic_t compound_pincount; #ifdef CONFIG_64BIT unsigned int compound_nr; /* 1 << compound_order */ + unsigned long _private_1; #endif }; struct { /* Second tail page of compound page */ @@ -264,6 +265,7 @@ struct page { * @_total_mapcount: Do not use directly, call folio_entire_mapcount(). * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). * @_folio_nr_pages: Do not use directly, call folio_nr_pages(). + * @_private_1: Do not use directly, call folio_get_private_1(). * * A folio is a physically, virtually and logically contiguous set * of bytes. It is a power-of-two in size, and it is aligned to that @@ -311,6 +313,7 @@ struct folio { #ifdef CONFIG_64BIT unsigned int _folio_nr_pages; #endif + unsigned long _private_1; }; #define FOLIO_MATCH(pg, fl) \ @@ -338,6 +341,7 @@ FOLIO_MATCH(compound_mapcount, _total_mapcount); FOLIO_MATCH(compound_pincount, _pincount); #ifdef CONFIG_64BIT FOLIO_MATCH(compound_nr, _folio_nr_pages); +FOLIO_MATCH(_private_1, _private_1); #endif #undef FOLIO_MATCH @@ -383,6 +387,16 @@ static inline void *folio_get_private(struct folio *folio) return folio->private; } +static inline void folio_set_private_1(struct folio *folio, unsigned long private) +{ + folio->_private_1 = private; +} + +static inline unsigned long folio_get_private_1(struct folio *folio) +{ + return folio->_private_1; +} + struct page_frag_cache { void * va; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)