Currently whole lightnvm and pblk uses single DMA pool,
for which entry size is always equal to PAGE_SIZE.
PPA list always needs 8b*64, so there is only 56b*64
space for OOB meta. Since NVMe OOB meta can be bigger,
such as 128b, this solution is not robustness.
This patch add the possiblity to support OOB meta above
56b by creating separate DMA pool for PBLK with entry
size which is big enough to store both PPA list and such
a OOB metadata.
Signed-off-by: Igor Konopko <igor.j.konopko@xxxxxxxxx>
---
drivers/lightnvm/core.c | 33 +++++++++++++++++++++++---------
drivers/lightnvm/pblk-core.c | 19 +++++++++---------
drivers/lightnvm/pblk-init.c | 11 +++++++++++
drivers/lightnvm/pblk-read.c | 3 ++-
drivers/lightnvm/pblk-recovery.c | 9 +++++----
drivers/lightnvm/pblk.h | 11 ++++++++++-
drivers/nvme/host/lightnvm.c | 6 ++++--
include/linux/lightnvm.h | 8 +++++---
8 files changed, 71 insertions(+), 29 deletions(-)
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index efb976a863d2..48db7a096257 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -641,20 +641,33 @@ void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
}
EXPORT_SYMBOL(nvm_unregister_tgt_type);
-void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
- dma_addr_t *dma_handler)
+void *nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
+ gfp_t mem_flags, dma_addr_t *dma_handler)
{
- return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
- dma_handler);
+ return dev->ops->dev_dma_alloc(dev, pool ?: dev->dma_pool,
+ mem_flags, dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
+void nvm_dev_dma_free(struct nvm_dev *dev, void *pool,
+ void *addr, dma_addr_t dma_handler)
{
- dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
+ dev->ops->dev_dma_free(pool ?: dev->dma_pool, addr, dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_free);
+void *nvm_dev_dma_create(struct nvm_dev *dev, int size, char *name)
+{
+ return dev->ops->create_dma_pool(dev, name, size);
+}
+EXPORT_SYMBOL(nvm_dev_dma_create);
+
+void nvm_dev_dma_destroy(struct nvm_dev *dev, void *pool)
+{
+ dev->ops->destroy_dma_pool(pool);
+}
+EXPORT_SYMBOL(nvm_dev_dma_destroy);
+
static struct nvm_dev *nvm_find_nvm_dev(const char *name)
{
struct nvm_dev *dev;
@@ -682,7 +695,8 @@ static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
}
rqd->nr_ppas = nr_ppas;
- rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
+ rqd->ppa_list = nvm_dev_dma_alloc(dev, NULL, GFP_KERNEL,
+ &rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("nvm: failed to allocate dma memory\n");
return -ENOMEM;
@@ -708,7 +722,8 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
if (!rqd->ppa_list)
return;
- nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(tgt_dev->parent, NULL, rqd->ppa_list,
+ rqd->dma_ppa_list);
}
static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
@@ -1145,7 +1160,7 @@ int nvm_register(struct nvm_dev *dev)
if (!dev->q || !dev->ops)
return -EINVAL;
- dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
+ dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist", PAGE_SIZE);
if (!dev->dma_pool) {
pr_err("nvm: could not create dma pool\n");
return -ENOMEM;