Sorry Dave , sure was tested on Intel Sky Lake-E CBDMA -----Original Message----- From: Dave Jiang <dave.jiang@xxxxxxxxx> Sent: Thursday, April 2, 2020 7:43 PM To: Ravich, Leonid; dmaengine@xxxxxxxxxxxxxxx Cc: lravich@xxxxxxxxx; Vinod Koul; Williams, Dan J; Greg Kroah-Hartman; Zavras, Alexios; Barabash, Alexander; Thomas Gleixner; Kate Stewart; Jilayne Lovejoy; Logan Gunthorpe; linux-kernel@xxxxxxxxxxxxxxx Subject: Re: [PATCH v2 1/2] dmaengine: ioat: fixing chunk sizing macros dependency [EXTERNAL EMAIL] On 4/2/2020 9:33 AM, leonid.ravich@xxxxxxxx wrote: > From: Leonid Ravich <Leonid.Ravich@xxxxxxx> > > prepare for changing alloc size. > > Acked-by: Dave Jiang <dave.jiang@xxxxxxxxx> > Signed-off-by: Leonid Ravich <Leonid.Ravich@xxxxxxx> Hi Leonid, I haven't actually acked this patch yet, pending your answer on if this has been tested on hardware. Thanks. > --- > drivers/dma/ioat/dma.c | 14 ++++++++------ > drivers/dma/ioat/dma.h | 10 ++++++---- > drivers/dma/ioat/init.c | 2 +- > 3 files changed, 15 insertions(+), 11 deletions(-) > > diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index > 18c011e..1e0e6c1 100644 > --- a/drivers/dma/ioat/dma.c > +++ b/drivers/dma/ioat/dma.c > @@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx) > u8 *pos; > off_t offs; > > - chunk = idx / IOAT_DESCS_PER_2M; > - idx &= (IOAT_DESCS_PER_2M - 1); > + chunk = idx / IOAT_DESCS_PER_CHUNK; > + idx &= (IOAT_DESCS_PER_CHUNK - 1); > offs = idx * IOAT_DESC_SZ; > pos = (u8 *)ioat_chan->descs[chunk].virt + offs; > phys = ioat_chan->descs[chunk].hw + offs; @@ -370,7 +370,8 @@ > struct ioat_ring_ent ** > if (!ring) > return NULL; > > - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M; > + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE; > + ioat_chan->desc_chunks = chunks; > > for (i = 0; i < chunks; i++) { > struct ioat_descs *descs = &ioat_chan->descs[i]; @@ -382,8 +383,9 > @@ struct ioat_ring_ent ** > > for (idx = 0; idx < i; idx++) { > descs = &ioat_chan->descs[idx]; > - dma_free_coherent(to_dev(ioat_chan), SZ_2M, > - descs->virt, descs->hw); > + dma_free_coherent(to_dev(ioat_chan), > + IOAT_CHUNK_SIZE, > + descs->virt, descs->hw); > descs->virt = NULL; > descs->hw = 0; > } > @@ -404,7 +406,7 @@ struct ioat_ring_ent ** > > for (idx = 0; idx < ioat_chan->desc_chunks; idx++) { > dma_free_coherent(to_dev(ioat_chan), > - SZ_2M, > + IOAT_CHUNK_SIZE, > ioat_chan->descs[idx].virt, > ioat_chan->descs[idx].hw); > ioat_chan->descs[idx].virt = NULL; diff --git > a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index > b8e8e0b..5216c6b 100644 > --- a/drivers/dma/ioat/dma.h > +++ b/drivers/dma/ioat/dma.h > @@ -81,6 +81,11 @@ struct ioatdma_device { > u32 msixpba; > }; > > +#define IOAT_MAX_ORDER 16 > +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) #define IOAT_CHUNK_SIZE > +(SZ_2M) #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ) > + > struct ioat_descs { > void *virt; > dma_addr_t hw; > @@ -128,7 +133,7 @@ struct ioatdma_chan { > u16 produce; > struct ioat_ring_ent **ring; > spinlock_t prep_lock; > - struct ioat_descs descs[2]; > + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK]; > int desc_chunks; > int intr_coalesce; > int prev_intr_coalesce; > @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err) > return !!err; > } > > -#define IOAT_MAX_ORDER 16 > -#define IOAT_MAX_DESCS 65536 > -#define IOAT_DESCS_PER_2M 32768 > > static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) > { > diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index > 60e9afb..58d1356 100644 > --- a/drivers/dma/ioat/init.c > +++ b/drivers/dma/ioat/init.c > @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c) > } > > for (i = 0; i < ioat_chan->desc_chunks; i++) { > - dma_free_coherent(to_dev(ioat_chan), SZ_2M, > + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE, > ioat_chan->descs[i].virt, > ioat_chan->descs[i].hw); > ioat_chan->descs[i].virt = NULL; >