On 09/08/22 12:37, Sidhartha Kumar wrote: > Allows struct folio to store hugetlb metadata that is contained in the > private field of the first tail page. On 32-bit, _private_1 aligns with > page[1].private. > > Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> > --- > include/linux/mm_types.h | 14 ++++++++++++++ > 1 file changed, 14 insertions(+) This change is fine with me as it makes the following patches dealing with hugetlb page subpool folio aware. I 'think' Matthew is OK with this direction, but it would be good if he chimed in. Acked-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> -- Mike Kravetz > > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h > index 8a9ee9d24973..7a33171a05b6 100644 > --- a/include/linux/mm_types.h > +++ b/include/linux/mm_types.h > @@ -143,6 +143,7 @@ struct page { > atomic_t compound_pincount; > #ifdef CONFIG_64BIT > unsigned int compound_nr; /* 1 << compound_order */ > + unsigned long _private_1; > #endif > }; > struct { /* Second tail page of compound page */ > @@ -251,6 +252,7 @@ struct page { > * @_total_mapcount: Do not use directly, call folio_entire_mapcount(). > * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). > * @_folio_nr_pages: Do not use directly, call folio_nr_pages(). > + * @_private_1: Do not use directly, call folio_get_private_1(). > * > * A folio is a physically, virtually and logically contiguous set > * of bytes. It is a power-of-two in size, and it is aligned to that > @@ -298,6 +300,7 @@ struct folio { > #ifdef CONFIG_64BIT > unsigned int _folio_nr_pages; > #endif > + unsigned long _private_1; > }; > > #define FOLIO_MATCH(pg, fl) \ > @@ -325,6 +328,7 @@ FOLIO_MATCH(compound_mapcount, _total_mapcount); > FOLIO_MATCH(compound_pincount, _pincount); > #ifdef CONFIG_64BIT > FOLIO_MATCH(compound_nr, _folio_nr_pages); > +FOLIO_MATCH(_private_1, _private_1); > #endif > #undef FOLIO_MATCH > > @@ -370,6 +374,16 @@ static inline void *folio_get_private(struct folio *folio) > return folio->private; > } > > +static inline void folio_set_private_1(struct folio *folio, unsigned long private) > +{ > + folio->_private_1 = private; > +} > + > +static inline unsigned long folio_get_private_1(struct folio *folio) > +{ > + return folio->_private_1; > +} > + > struct page_frag_cache { > void * va; > #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) > -- > 2.31.1 >