On Tue, Mar 17, 2015 at 12:46:12AM -0500, Andy Gross wrote: > +static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie, > + struct dma_tx_state *txstate) > +{ > + struct adm_chan *achan = to_adm_chan(chan); > + struct virt_dma_desc *vd; > + enum dma_status ret; > + unsigned long flags; > + size_t residue = 0; > + > + ret = dma_cookie_status(chan, cookie, txstate); > + if (ret == DMA_COMPLETE || !txstate) > + return ret; > + > + spin_lock_irqsave(&achan->vc.lock, flags); > + > + vd = vchan_find_desc(&achan->vc, cookie); > + if (vd) > + residue = container_of(vd, struct adm_async_desc, vd)->length; > + > + spin_unlock_irqrestore(&achan->vc.lock, flags); > + > + /* > + * residue is either the full length if it is in the issued list, or 0 > + * if it is in progress. We have no reliable way of determining > + * anything inbetween > + */ > + dma_set_residue(txstate, residue); > + > + if (achan->error) > + return DMA_ERROR; but this may not be for the current descriptor right, which is queued? -- ~Vinod -- To unsubscribe from this list: send the line "unsubscribe devicetree" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html