On Fri, 16 Apr 2021 16:27:55 +0100 Matthew Wilcox <willy@xxxxxxxxxxxxx> wrote: > On Thu, Apr 15, 2021 at 08:08:32PM +0200, Jesper Dangaard Brouer wrote: > > See below patch. Where I swap32 the dma address to satisfy > > page->compound having bit zero cleared. (It is the simplest fix I could > > come up with). > > I think this is slightly simpler, and as a bonus code that assumes the > old layout won't compile. This is clever, I like it! When reading the code one just have to remember 'unsigned long' size difference between 64-bit vs 32-bit. And I assume compiler can optimize the sizeof check out then doable. > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h > index 6613b26a8894..5aacc1c10a45 100644 > --- a/include/linux/mm_types.h > +++ b/include/linux/mm_types.h > @@ -97,10 +97,10 @@ struct page { > }; > struct { /* page_pool used by netstack */ > /** > - * @dma_addr: might require a 64-bit value even on > + * @dma_addr: might require a 64-bit value on > * 32-bit architectures. > */ > - dma_addr_t dma_addr; > + unsigned long dma_addr[2]; > }; > struct { /* slab, slob and slub */ > union { > diff --git a/include/net/page_pool.h b/include/net/page_pool.h > index b5b195305346..db7c7020746a 100644 > --- a/include/net/page_pool.h > +++ b/include/net/page_pool.h > @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, > > static inline dma_addr_t page_pool_get_dma_addr(struct page *page) > { > - return page->dma_addr; > + dma_addr_t ret = page->dma_addr[0]; > + if (sizeof(dma_addr_t) > sizeof(unsigned long)) > + ret |= (dma_addr_t)page->dma_addr[1] << 32; > + return ret; > +} > + > +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) > +{ > + page->dma_addr[0] = addr; > + if (sizeof(dma_addr_t) > sizeof(unsigned long)) > + page->dma_addr[1] = addr >> 32; > } > > static inline bool is_page_pool_compiled_in(void) > diff --git a/net/core/page_pool.c b/net/core/page_pool.c > index ad8b0707af04..f014fd8c19a6 100644 > --- a/net/core/page_pool.c > +++ b/net/core/page_pool.c > @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool, > struct page *page, > unsigned int dma_sync_size) > { > + dma_addr_t dma_addr = page_pool_get_dma_addr(page); > + > dma_sync_size = min(dma_sync_size, pool->p.max_len); > - dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, > + dma_sync_single_range_for_device(pool->p.dev, dma_addr, > pool->p.offset, dma_sync_size, > pool->p.dma_dir); > } > @@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, > put_page(page); > return NULL; > } > - page->dma_addr = dma; > + page_pool_set_dma_addr(page, dma); > > if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) > page_pool_dma_sync_for_device(pool, page, pool->p.max_len); > @@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) > */ > goto skip_dma_unmap; > > - dma = page->dma_addr; > + dma = page_pool_get_dma_addr(page); > > - /* When page is unmapped, it cannot be returned our pool */ > + /* When page is unmapped, it cannot be returned to our pool */ > dma_unmap_page_attrs(pool->p.dev, dma, > PAGE_SIZE << pool->p.order, pool->p.dma_dir, > DMA_ATTR_SKIP_CPU_SYNC); > - page->dma_addr = 0; > + page_pool_set_dma_addr(page, 0); > skip_dma_unmap: > /* This may be the last page returned, releasing the pool, so > * it is not safe to reference pool afterwards. > -- Best regards, Jesper Dangaard Brouer MSc.CS, Principal Kernel Engineer at Red Hat LinkedIn: http://www.linkedin.com/in/brouer