As reported by Aaro, the JZ4740 MMC driver throws a warning when the kernel is built without preemption (CONFIG_PREEMPT_NONE=y). [ 16.461094] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 567 host->next_data.cookie 568 [ 16.473120] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 568 host->next_data.cookie 569 [ 16.485144] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 569 host->next_data.cookie 570 [ 16.497170] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 570 host->next_data.cookie 571 [ 16.509194] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 571 host->next_data.cookie 572 [ 16.532421] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 572 host->next_data.cookie 573 [ 16.544594] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 573 host->next_data.cookie 574 [ 16.556621] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 574 host->next_data.cookie 575 [ 16.568638] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 575 host->next_data.cookie 576 [ 16.601092] jz4740-mmc 13450000.mmc: [jz4740_mmc_prepare_dma_data] invalid cookie: data->host_cookie 582 host->next_data.cookie 583 The problem seems to be related to how pre_req/post_req is implemented. Currently, it seems the driver expects jz4740_mmc_prepare_dma_data() to be called with monotonically increasing host_cookie values, which is wrong. Moreover, the implementation is overly complicated, keeping track of unneeded "next cookie" state. So, instead of attempting to fix the current pre_req/post_req implementation, this commit refactors the driver, dropping the state, following other drivers such as dw_mmc and sdhci. Cc: Paul Cercueil <paul@xxxxxxxxxxxxxxx> Cc: Mathieu Malaterre <malat@xxxxxxxxxx> Reported-by: Aaro Koskinen <aaro.koskinen@xxxxxx> Signed-off-by: Ezequiel Garcia <ezequiel@xxxxxxxxxxxxx> --- drivers/mmc/host/jz4740_mmc.c | 118 ++++++++++++++++------------------ 1 file changed, 54 insertions(+), 64 deletions(-) diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 0c1efd5100b7..ca20e2f81be8 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -126,9 +126,23 @@ enum jz4740_mmc_state { JZ4740_MMC_STATE_DONE, }; -struct jz4740_mmc_host_next { - int sg_len; - s32 cookie; +/* + * The MMC core allows to prepare a mmc_request while another mmc_request + * is in-flight. This is used via the pre_req/post_req hooks. + * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request. + * Following what other drivers do (sdhci, dw_mmc) we use the following cookie + * flags to keep track of the mmc_request mapping state. + * + * COOKIE_UNMAPPED: the request is not mapped. + * COOKIE_PREMAPPED: the request was mapped in pre_req, + * and should be unmapped in post_req. + * COOKIE_MAPPED: the request was mapped in the irq handler, + * and should be unmapped before mmc_request_done is called.. + */ +enum jz4780_cookie { + COOKIE_UNMAPPED = 0, + COOKIE_PREMAPPED, + COOKIE_MAPPED, }; struct jz4740_mmc_host { @@ -162,9 +176,7 @@ struct jz4740_mmc_host { /* DMA support */ struct dma_chan *dma_rx; struct dma_chan *dma_tx; - struct jz4740_mmc_host_next next_data; bool use_dma; - int sg_len; /* The DMA trigger level is 8 words, that is to say, the DMA read * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write @@ -226,9 +238,6 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) return PTR_ERR(host->dma_rx); } - /* Initialize DMA pre request cookie */ - host->next_data.cookie = 1; - return 0; } @@ -245,60 +254,44 @@ static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, enum dma_data_direction dir = mmc_get_dma_dir(data); dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + data->host_cookie = COOKIE_UNMAPPED; } -/* Prepares DMA data for current/next transfer, returns non-zero on failure */ +/* Prepares DMA data for current or next transfer. + * A request can be in-flight when this is called. + */ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, struct mmc_data *data, - struct jz4740_mmc_host_next *next, - struct dma_chan *chan) + int cookie) { - struct jz4740_mmc_host_next *next_data = &host->next_data; + struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); enum dma_data_direction dir = mmc_get_dma_dir(data); - int sg_len; - - if (!next && data->host_cookie && - data->host_cookie != host->next_data.cookie) { - dev_warn(mmc_dev(host->mmc), - "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n", - __func__, - data->host_cookie, - host->next_data.cookie); - data->host_cookie = 0; - } + int sg_count; - /* Check if next job is already prepared */ - if (next || data->host_cookie != host->next_data.cookie) { - sg_len = dma_map_sg(chan->device->dev, - data->sg, - data->sg_len, - dir); + if (data->host_cookie == COOKIE_PREMAPPED) + return data->sg_count; - } else { - sg_len = next_data->sg_len; - next_data->sg_len = 0; - } + sg_count = dma_map_sg(chan->device->dev, + data->sg, + data->sg_len, + dir); - if (sg_len <= 0) { + if (sg_count <= 0) { dev_err(mmc_dev(host->mmc), "Failed to map scatterlist for DMA operation\n"); return -EINVAL; } - if (next) { - next->sg_len = sg_len; - data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; - } else - host->sg_len = sg_len; + data->sg_count = sg_count; + data->host_cookie = cookie; - return 0; + return data->sg_count; } static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, struct mmc_data *data) { - int ret; - struct dma_chan *chan; + struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); struct dma_async_tx_descriptor *desc; struct dma_slave_config conf = { .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, @@ -306,29 +299,26 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, }; + int sg_count; if (data->flags & MMC_DATA_WRITE) { conf.direction = DMA_MEM_TO_DEV; conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; - chan = host->dma_tx; } else { conf.direction = DMA_DEV_TO_MEM; conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE; - chan = host->dma_rx; } - ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan); - if (ret) - return ret; + sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED); + if (sg_count < 0) + return sg_count; dmaengine_slave_config(chan, &conf); - desc = dmaengine_prep_slave_sg(chan, - data->sg, - host->sg_len, - conf.direction, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, + conf.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(mmc_dev(host->mmc), "Failed to allocate DMA %s descriptor", @@ -342,7 +332,8 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, return 0; dma_unmap: - jz4740_mmc_dma_unmap(host, data); + if (data->host_cookie == COOKIE_MAPPED) + jz4740_mmc_dma_unmap(host, data); return -ENOMEM; } @@ -351,16 +342,13 @@ static void jz4740_mmc_pre_request(struct mmc_host *mmc, { struct jz4740_mmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - struct jz4740_mmc_host_next *next_data = &host->next_data; - BUG_ON(data->host_cookie); - - if (host->use_dma) { - struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); + if (!host->use_dma) + return; - if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan)) - data->host_cookie = 0; - } + data->host_cookie = COOKIE_UNMAPPED; + if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0) + data->host_cookie = COOKIE_UNMAPPED; } static void jz4740_mmc_post_request(struct mmc_host *mmc, @@ -370,10 +358,8 @@ static void jz4740_mmc_post_request(struct mmc_host *mmc, struct jz4740_mmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - if (host->use_dma && data->host_cookie) { + if (data && data->host_cookie != COOKIE_UNMAPPED) jz4740_mmc_dma_unmap(host, data); - data->host_cookie = 0; - } if (err) { struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); @@ -436,10 +422,14 @@ static void jz4740_mmc_reset(struct jz4740_mmc_host *host) static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) { struct mmc_request *req; + struct mmc_data *data; req = host->req; + data = req->data; host->req = NULL; + if (data && data->host_cookie == COOKIE_MAPPED) + jz4740_mmc_dma_unmap(host, data); mmc_request_done(host->mmc, req); } -- 2.19.1