A virtual block enables a block to identify multiple physical blocks. This is useful for metadata where a device media supports multiple planes. In that case, a block, with multiple planes can be managed as a single vblk. Reducing the metadata required by one forth. nvm_set_rqd_ppalist() takes care of expanding a ppa_list with vblks automatically. However, for some use-cases, where only a single physical block is required, the ppa_list should not be expanded. Therefore, add a vblk parameter to nvm_set_rqd_ppalist(), and only expand the ppa_list if vblk is set. Signed-off-by: Matias Bjørling <m@xxxxxxxxxxx> --- drivers/lightnvm/core.c | 31 +++++++++++++++++-------------- drivers/lightnvm/sysblk.c | 2 +- include/linux/lightnvm.h | 2 +- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index e6d7a98..de5db7b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -251,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) EXPORT_SYMBOL(nvm_generic_to_addr_mode); int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, - struct ppa_addr *ppas, int nr_ppas) + struct ppa_addr *ppas, int nr_ppas, int vblk) { int i, plane_cnt, pl_idx; - if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { - rqd->nr_pages = 1; + if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { + rqd->nr_pages = nr_ppas; rqd->ppa_addr = ppas[0]; return 0; } - plane_cnt = dev->plane_mode; - rqd->nr_pages = plane_cnt * nr_ppas; - - if (dev->ops->max_phys_sect < rqd->nr_pages) - return -EINVAL; - + rqd->nr_pages = nr_ppas; rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); if (!rqd->ppa_list) { pr_err("nvm: failed to allocate dma memory\n"); return -ENOMEM; } - for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { + if (!vblk) { + for (i = 0; i < nr_ppas; i++) + rqd->ppa_list[i] = ppas[i]; + } else { + plane_cnt = dev->plane_mode; + rqd->nr_pages *= plane_cnt; + for (i = 0; i < nr_ppas; i++) { - ppas[i].g.pl = pl_idx; - rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i]; + for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { + ppas[i].g.pl = pl_idx; + rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i]; + } } } @@ -304,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas) memset(&rqd, 0, sizeof(struct nvm_rq)); - ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas); + ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); if (ret) return ret; @@ -420,7 +423,7 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, int ret; memset(&rqd, 0, sizeof(struct nvm_rq)); - ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas); + ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1); if (ret) return ret; diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c index bca6902..737fbc3 100644 --- a/drivers/lightnvm/sysblk.c +++ b/drivers/lightnvm/sysblk.c @@ -277,7 +277,7 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) memset(&rqd, 0, sizeof(struct nvm_rq)); - nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas); + nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1); nvm_generic_to_addr_mode(dev, &rqd); ret = dev->ops->set_bb_tbl(dev, &rqd, type); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 16d4f2e..9ae0b7c 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -526,7 +526,7 @@ extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, - struct ppa_addr *, int); + struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); -- 2.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-block" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html