On Tue, 2016-07-12 at 11:57 +0000, Lars-Peter Clausen wrote: > On 07/12/2016 01:45 AM, Dave Jiang wrote: > > Adding error reporting infrastructure though the DMA cookie. > > An error status will be set in the cookie if the descriptor is > > returned from driver as failed in order to notify upper layer of > > the status. > > > > Signed-off-by: Dave Jiang <dave.jiang@xxxxxxxxx> > > Hi, > > Thanks for working on this. But I'm afraid that this particular > approach > will not work. The idea behind the cookies is that they are stateless > except > for the cookie value itself and do not require any lifetime > management. The > cookie values are monotonically incrementing which means you can > easily > check whether a transfer is completed by comparing its value to the > last > completed transfer. No additional state is required. > > What you are doing in this patch is conceptionally the same as you > did in > the first version of your patch. You assign additional state as well > as > lifetime management to the cookie embedded in the > dma_async_tx_descriptor > struct. The lifetime of this new cookie struct is the same as the > tx_descriptor it belong to though and all your interfaces work on a > struct > dma_async_tx_descriptor as well. So in essence this is not any > different > compared to embedding the error code in the tx_descriptor itself. In > addition this patch series and this patch in particular introduce a > fair > amount of memory leaks since not everybody is calling > dma_cookie_free(). > > So the same review points of version 1 of the patch still apply. > > My personal preference for error reporting is to add status field to > the > complete callback. This state struct should contain error contentions > but > also other status information like the number of bytes transferred > for the > descriptor. Thanks for the feedback. That sounds reasonable. Let me work on that. So to clarify, what you are saying is to modify this callback[1] to add an additional reporting struct: typedef void (*dma_async_tx_callback)(void *dma_async_param, struct dma_result *result); And let the upper layer allocate the memory for that report struct and provide it to the DMA driver to store the results in? [1]: http://lxr.free-electrons.com/source/include/linux/dmaengine.h#L44 2 > > - Lars > > > --- > > drivers/dma/dmaengine.c | 4 ++-- > > drivers/dma/dmaengine.h | 38 +++++++++++++++++++++++++++++-- > > ------- > > drivers/dma/hsu/hsu.c | 2 +- > > drivers/dma/ioat/dma.h | 2 +- > > drivers/dma/virt-dma.c | 4 ++-- > > drivers/dma/virt-dma.h | 4 ++-- > > include/linux/dmaengine.h | 31 ++++++++++++++++++++++++++++++- > > 7 files changed, 67 insertions(+), 18 deletions(-) > > > > diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c > > index 8c9f45f..21fd715 100644 > > --- a/drivers/dma/dmaengine.c > > +++ b/drivers/dma/dmaengine.c > > @@ -1224,7 +1224,7 @@ dma_wait_for_async_tx(struct > > dma_async_tx_descriptor *tx) > > if (!tx) > > return DMA_COMPLETE; > > > > - while (tx->cookie == -EBUSY) { > > + while (tx->cookie->cookie == -EBUSY) { > > if (time_after_eq(jiffies, dma_sync_wait_timeout)) > > { > > dev_err(tx->chan->device->dev, > > "%s timeout waiting for descriptor > > submission\n", > > @@ -1233,7 +1233,7 @@ dma_wait_for_async_tx(struct > > dma_async_tx_descriptor *tx) > > } > > cpu_relax(); > > } > > - return dma_sync_wait(tx->chan, tx->cookie); > > + return dma_sync_wait(tx->chan, tx->cookie->cookie); > > } > > EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); > > > > diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h > > index 17f983a..048aa81 100644 > > --- a/drivers/dma/dmaengine.h > > +++ b/drivers/dma/dmaengine.h > > @@ -6,6 +6,7 @@ > > #define DMAENGINE_H > > > > #include <linux/bug.h> > > +#include <linux/slab.h> > > #include <linux/dmaengine.h> > > > > /** > > @@ -28,14 +29,19 @@ static inline void dma_cookie_init(struct > > dma_chan *chan) > > static inline dma_cookie_t dma_cookie_assign(struct > > dma_async_tx_descriptor *tx) > > { > > struct dma_chan *chan = tx->chan; > > - dma_cookie_t cookie; > > + struct dma_cookie *cookie; > > > > - cookie = chan->cookie + 1; > > - if (cookie < DMA_MIN_COOKIE) > > - cookie = DMA_MIN_COOKIE; > > - tx->cookie = chan->cookie = cookie; > > + cookie = kzalloc(sizeof(struct dma_cookie), GFP_KERNEL); > > + if (!cookie) > > + return -ENOMEM; > > > > - return cookie; > > + cookie->cookie = chan->cookie + 1; > > + if (cookie->cookie < DMA_MIN_COOKIE) > > + cookie->cookie = DMA_MIN_COOKIE; > > + tx->cookie->cookie = chan->cookie; > > + tx->cookie = cookie; > > + > > + return cookie->cookie; > > } > > > > /** > > @@ -50,9 +56,23 @@ static inline dma_cookie_t > > dma_cookie_assign(struct dma_async_tx_descriptor *tx) > > */ > > static inline void dma_cookie_complete(struct > > dma_async_tx_descriptor *tx) > > { > > - BUG_ON(tx->cookie < DMA_MIN_COOKIE); > > - tx->chan->completed_cookie = tx->cookie; > > - tx->cookie = 0; > > + BUG_ON(tx->cookie->cookie < DMA_MIN_COOKIE); > > + tx->chan->completed_cookie = tx->cookie->cookie; > > + tx->cookie->cookie = 0; > > +} > > + > > +/** > > + * dma_cookie_free - free the dma cookie > > + * @tx: descriptor that points to the cookie > > + * > > + * Free the allocated dma_cookie > > + */ > > +static inline void dma_cookie_free(struct dma_async_tx_descriptor > > *tx) > > +{ > > + if (tx->cookie) { > > + kfree(tx->cookie); > > + tx->cookie = NULL; > > + } > > } > > > > /** > > diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c > > index f8c5cd5..50e714a 100644 > > --- a/drivers/dma/hsu/hsu.c > > +++ b/drivers/dma/hsu/hsu.c > > @@ -283,7 +283,7 @@ static enum dma_status hsu_dma_tx_status(struct > > dma_chan *chan, > > > > spin_lock_irqsave(&hsuc->vchan.lock, flags); > > vdesc = vchan_find_desc(&hsuc->vchan, cookie); > > - if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { > > + if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie- > > >cookie) { > > bytes = hsu_dma_active_desc_size(hsuc); > > dma_set_residue(state, bytes); > > status = hsuc->desc->status; > > diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h > > index a9bc1a1..d73acec 100644 > > --- a/drivers/dma/ioat/dma.h > > +++ b/drivers/dma/ioat/dma.h > > @@ -231,7 +231,7 @@ __dump_desc_dbg(struct ioatdma_chan *ioat_chan, > > struct ioat_dma_descriptor *hw, > > dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: > > %#x" > > " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", > > id, > > (unsigned long long) tx->phys, > > - (unsigned long long) hw->next, tx->cookie, tx- > > >flags, > > + (unsigned long long) hw->next, tx->cookie->cookie, > > tx->flags, > > hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw- > > >ctl_f.compl_write); > > } > > > > diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c > > index a35c211..9487873 100644 > > --- a/drivers/dma/virt-dma.c > > +++ b/drivers/dma/virt-dma.c > > @@ -60,7 +60,7 @@ int vchan_tx_desc_free(struct > > dma_async_tx_descriptor *tx) > > spin_unlock_irqrestore(&vc->lock, flags); > > > > dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: > > freeing\n", > > - vc, vd, vd->tx.cookie); > > + vc, vd, vd->tx.cookie->cookie); > > vc->desc_free(vd); > > return 0; > > } > > @@ -72,7 +72,7 @@ struct virt_dma_desc *vchan_find_desc(struct > > virt_dma_chan *vc, > > struct virt_dma_desc *vd; > > > > list_for_each_entry(vd, &vc->desc_issued, node) > > - if (vd->tx.cookie == cookie) > > + if (vd->tx.cookie->cookie == cookie) > > return vd; > > > > return NULL; > > diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h > > index d9731ca..c82e90a 100644 > > --- a/drivers/dma/virt-dma.h > > +++ b/drivers/dma/virt-dma.h > > @@ -92,12 +92,12 @@ static inline bool vchan_issue_pending(struct > > virt_dma_chan *vc) > > static inline void vchan_cookie_complete(struct virt_dma_desc *vd) > > { > > struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); > > - dma_cookie_t cookie; > > + struct dma_cookie *cookie; > > > > cookie = vd->tx.cookie; > > dma_cookie_complete(&vd->tx); > > dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked > > complete\n", > > - vd, cookie); > > + vd, cookie->cookie); > > list_add_tail(&vd->node, &vc->desc_completed); > > > > tasklet_schedule(&vc->task); > > diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h > > index 30de019..9fa47d4 100644 > > --- a/include/linux/dmaengine.h > > +++ b/include/linux/dmaengine.h > > @@ -34,6 +34,18 @@ > > typedef s32 dma_cookie_t; > > #define DMA_MIN_COOKIE 1 > > > > +enum dma_trans_error { > > + DMA_TRANS_NOERROR = 0, > > + DMA_TRANS_READ_FAILED, > > + DMA_TRANS_WRITE_FAILED, > > + DMA_TRANS_ABORTED, > > +}; > > + > > +struct dma_cookie { > > + dma_cookie_t cookie; > > + enum dma_trans_error error; > > +}; > > + > > static inline int dma_submit_error(dma_cookie_t cookie) > > { > > return cookie < 0 ? cookie : 0; > > @@ -471,7 +483,7 @@ struct dmaengine_unmap_data { > > * @lock: protect the parent and next pointers > > */ > > struct dma_async_tx_descriptor { > > - dma_cookie_t cookie; > > + struct dma_cookie *cookie; > > enum dma_ctrl_flags flags; /* not a 'long' to pack with > > cookie */ > > dma_addr_t phys; > > struct dma_chan *chan; > > @@ -781,6 +793,23 @@ struct dma_device { > > void (*device_issue_pending)(struct dma_chan *chan); > > }; > > > > +static inline bool dma_trans_good(struct dma_async_tx_descriptor > > *tx) > > +{ > > + return (tx->cookie->cookie == DMA_TRANS_NOERROR) ? true : > > false; > > +} > > + > > +static inline enum dma_trans_error > > +dma_trans_status(struct dma_async_tx_descriptor *tx) > > +{ > > + return tx->cookie->error; > > +} > > + > > +static inline void dma_trans_set_status(struct > > dma_async_tx_descriptor *tx, > > + enum dma_trans_error > > status) > > +{ > > + tx->cookie->error = status; > > +} > > + > > static inline int dmaengine_slave_config(struct dma_chan *chan, > > struct dma_slave_config > > *config) > > { > > > > -- > > To unsubscribe from this list: send the line "unsubscribe > > dmaengine" in > > the body of a message to majordomo@xxxxxxxxxxxxxxx > > More majordomo info at http://vger.kernel.org/majordomo-info.html > > > ��.n��������+%������w��{.n��������)�)��jg��������ݢj����G�������j:+v���w�m������w�������h�����٥