> On 16 Feb 2016, at 12:28, Wenwei Tao <ww.tao0320@xxxxxxxxx> wrote: > > When create a target, we specify the begin lunid and > the end lunid, and get the corresponding continuous > luns from media manager, if one of the luns is not free, > we failed to create the target, even if the device's > total free luns are enough. > > So add non-continuous lun target creation support, > thus we can improve the backend device's space utilization. In general, the idea is good. I tested the patch on top of for-next and I can see that the simple LUN creation fails in rrpc_map_init. This is probably due to the changes you made to the transverse mapping table. See some comments below. Also, I assume that you expect the default path to fall back into NVM_CONFIG_TYPE_SIMPLE, right? I tested NVM_CONFIG_TYPE_LIST modifying lnvm [1], but maybe you want to send a pull request and add the functionality yourself. Besides this, the ioctl interface seems clean; maybe Matias has more comments on it. [1] https://github.com/OpenChannelSSD/lnvm Other comments inline. > Signed-off-by: Wenwei Tao <ww.tao0320@xxxxxxxxx> > --- > Changes since v3 > -limit list luns to 768, thus we don't go > above a single memory page. > -move NVM_DEV_FREE_LUNS to its own patch and > rename it to NVM_DEV_LUNS_STATUS. > -insert and delete some lines to increase > readability. > -rebase to for-4.6 > > Changes since v2 > -rebase on for-next branch > -move luns bitmap to PATCH 2 > -remove the logic to dynamically select another lun than > the one requested > -implment lunid list in the lnvm ioctl interface > > Changes since v1 > -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun > -add target creation flags check > -rebase to v4.5-rc1 > > drivers/lightnvm/core.c | 64 +++++++------ > drivers/lightnvm/rrpc.c | 202 ++++++++++++++++++++++++++++-------------- > drivers/lightnvm/rrpc.h | 7 +- > include/linux/lightnvm.h | 4 +- > include/uapi/linux/lightnvm.h | 9 ++ > 5 files changed, 186 insertions(+), 100 deletions(-) > > diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c > index 7a8dd1a..95c5445 100644 > --- a/drivers/lightnvm/core.c > +++ b/drivers/lightnvm/core.c > @@ -625,7 +625,7 @@ static const struct block_device_operations nvm_fops = { > static int nvm_create_target(struct nvm_dev *dev, > struct nvm_ioctl_create *create) > { > - struct nvm_ioctl_create_simple *s = &create->conf.s; > + struct nvm_ioctl_create_conf *conf = &create->conf; > struct request_queue *tqueue; > struct gendisk *tdisk; > struct nvm_tgt_type *tt; > @@ -674,7 +674,7 @@ static int nvm_create_target(struct nvm_dev *dev, > tdisk->fops = &nvm_fops; > tdisk->queue = tqueue; > > - targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end); > + targetdata = tt->init(dev, tdisk, conf); > if (IS_ERR(targetdata)) > goto err_init; > > @@ -726,7 +726,6 @@ static void nvm_remove_target(struct nvm_target *t) > static int __nvm_configure_create(struct nvm_ioctl_create *create) > { > struct nvm_dev *dev; > - struct nvm_ioctl_create_simple *s; > > down_write(&nvm_lock); > dev = nvm_find_nvm_dev(create->dev); > @@ -736,17 +735,11 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) > return -EINVAL; > } > > - if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { > + if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE && > + create->conf.type != NVM_CONFIG_TYPE_LIST) { > pr_err("nvm: config type not valid\n"); > return -EINVAL; > } > - s = &create->conf.s; > - > - if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) { > - pr_err("nvm: lun out of bound (%u:%u > %u)\n", > - s->lun_begin, s->lun_end, dev->nr_luns); > - return -EINVAL; > - } > > return nvm_create_target(dev, create); > } > @@ -824,24 +817,29 @@ static int nvm_configure_remove(const char *val) > > static int nvm_configure_create(const char *val) > { > - struct nvm_ioctl_create create; > + struct nvm_ioctl_create *create; > char opcode; > int lun_begin, lun_end, ret; > > - ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev, > - create.tgtname, create.tgttype, > + create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL); > + if (!create) > + return -ENOMEM; > + > + ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create->dev, > + create->tgtname, create->tgttype, > &lun_begin, &lun_end); > if (ret != 6) { > pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n"); > + kfree(create); > return -EINVAL; > } > > - create.flags = 0; > - create.conf.type = NVM_CONFIG_TYPE_SIMPLE; > - create.conf.s.lun_begin = lun_begin; > - create.conf.s.lun_end = lun_end; > + create->flags = 0; > + create->conf.type = NVM_CONFIG_TYPE_SIMPLE; > + create->conf.s.lun_begin = lun_begin; > + create->conf.s.lun_end = lun_end; > > - return __nvm_configure_create(&create); > + return __nvm_configure_create(create); > } > > > @@ -994,24 +992,34 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) > > static long nvm_ioctl_dev_create(struct file *file, void __user *arg) > { > - struct nvm_ioctl_create create; > + struct nvm_ioctl_create *create; > + long ret = -EINVAL; > > if (!capable(CAP_SYS_ADMIN)) > return -EPERM; > + create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL); > + if (!create) > + return -ENOMEM; > > - if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) > - return -EFAULT; > + if (copy_from_user(create, arg, sizeof(struct nvm_ioctl_create))) { > + ret = -EFAULT; > + goto out; > + } > > - create.dev[DISK_NAME_LEN - 1] = '\0'; > - create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; > - create.tgtname[DISK_NAME_LEN - 1] = '\0'; > + create->dev[DISK_NAME_LEN - 1] = '\0'; > + create->tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; > + create->tgtname[DISK_NAME_LEN - 1] = '\0'; > > - if (create.flags != 0) { > + if (create->flags != 0) { > pr_err("nvm: no flags supported\n"); > - return -EINVAL; > + goto out; > } > > - return __nvm_configure_create(&create); > + ret = __nvm_configure_create(create); > + > +out: > + kfree(create); > + return ret; > } > > static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) > diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c > index c2f9a64..c35cf0a 100644 > --- a/drivers/lightnvm/rrpc.c > +++ b/drivers/lightnvm/rrpc.c > @@ -23,28 +23,35 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, > struct nvm_rq *rqd, unsigned long flags); > > #define rrpc_for_each_lun(rrpc, rlun, i) \ > - for ((i) = 0, rlun = &(rrpc)->luns[0]; \ > - (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) > + for ((i) = 0, rlun = &(rrpc)->luns[0]; \ > + (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) > + > +static inline u64 lun_poffset(struct nvm_lun *lun, struct nvm_dev *dev) > +{ > + return lun->id * dev->sec_per_lun; > +} > > static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) > { > struct rrpc_block *rblk = a->rblk; > - unsigned int pg_offset; > + struct rrpc_lun *rlun = rblk->rlun; > + u64 pg_offset; > > - lockdep_assert_held(&rrpc->rev_lock); > + lockdep_assert_held(&rlun->rev_lock); > > if (a->addr == ADDR_EMPTY || !rblk) > return; > > spin_lock(&rblk->lock); > > - div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset); > + div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset); > WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); > rblk->nr_invalid_pages++; > > spin_unlock(&rblk->lock); > > - rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; > + pg_offset = lun_poffset(rlun->parent, rrpc->dev); > + rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY; > } > > static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, > @@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, > { > sector_t i; > > - spin_lock(&rrpc->rev_lock); > for (i = slba; i < slba + len; i++) { > struct rrpc_addr *gp = &rrpc->trans_map[i]; > + struct rrpc_lun *rlun = gp->rblk->rlun; > > + spin_lock(&rlun->rev_lock); > rrpc_page_invalidate(rrpc, gp); > + spin_unlock(&rlun->rev_lock); > gp->rblk = NULL; > } > - spin_unlock(&rrpc->rev_lock); > } > > static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, > @@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio) > static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) > { > struct request_queue *q = rrpc->dev->q; > + struct rrpc_lun *rlun = rblk->rlun; > struct rrpc_rev_addr *rev; > struct nvm_rq *rqd; > struct bio *bio; > struct page *page; > int slot; > int nr_pgs_per_blk = rrpc->dev->pgs_per_blk; > - u64 phys_addr; > + u64 phys_addr, poffset; > DECLARE_COMPLETION_ONSTACK(wait); > > if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) > @@ -305,6 +314,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) > return -ENOMEM; > } > > + poffset = lun_poffset(rlun->parent, rrpc->dev); > while ((slot = find_first_zero_bit(rblk->invalid_pages, > nr_pgs_per_blk)) < nr_pgs_per_blk) { > > @@ -312,23 +322,23 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) > phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot; > > try: > - spin_lock(&rrpc->rev_lock); > + spin_lock(&rlun->rev_lock); > /* Get logical address from physical to logical table */ > - rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; > + rev = &rlun->rev_trans_map[phys_addr - poffset]; > /* already updated by previous regular write */ > if (rev->addr == ADDR_EMPTY) { > - spin_unlock(&rrpc->rev_lock); > + spin_unlock(&rlun->rev_lock); > continue; > } > > rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); > if (IS_ERR_OR_NULL(rqd)) { > - spin_unlock(&rrpc->rev_lock); > + spin_unlock(&rlun->rev_lock); > schedule(); > goto try; > } > > - spin_unlock(&rrpc->rev_lock); > + spin_unlock(&rlun->rev_lock); > > /* Perform read to do GC */ > bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); > @@ -397,7 +407,7 @@ static void rrpc_block_gc(struct work_struct *work) > struct rrpc_block *rblk = gcb->rblk; > struct nvm_dev *dev = rrpc->dev; > struct nvm_lun *lun = rblk->parent->lun; > - struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; > + struct rrpc_lun *rlun = lun->private; > > mempool_free(gcb, rrpc->gcb_pool); > pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); > @@ -500,7 +510,7 @@ static void rrpc_gc_queue(struct work_struct *work) > struct rrpc_block *rblk = gcb->rblk; > struct nvm_lun *lun = rblk->parent->lun; > struct nvm_block *blk = rblk->parent; > - struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; > + struct rrpc_lun *rlun = lun->private; > > spin_lock(&rlun->lock); > list_add_tail(&rblk->prio, &rlun->prio_list); > @@ -551,22 +561,25 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) > static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, > struct rrpc_block *rblk, u64 paddr) > { > + struct rrpc_lun *rlun = rblk->rlun; > struct rrpc_addr *gp; > struct rrpc_rev_addr *rev; > + u64 poffset = lun_poffset(rlun->parent, rrpc->dev); > > BUG_ON(laddr >= rrpc->nr_sects); > > gp = &rrpc->trans_map[laddr]; > - spin_lock(&rrpc->rev_lock); > + spin_lock(&rlun->rev_lock); > + > if (gp->rblk) > rrpc_page_invalidate(rrpc, gp); > > gp->addr = paddr; > gp->rblk = rblk; > > - rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; > + rev = &rlun->rev_trans_map[gp->addr - poffset]; > rev->addr = laddr; > - spin_unlock(&rrpc->rev_lock); > + spin_unlock(&rlun->rev_lock); > > return gp; > } > @@ -980,7 +993,6 @@ static int rrpc_gc_init(struct rrpc *rrpc) > > static void rrpc_map_free(struct rrpc *rrpc) > { > - vfree(rrpc->rev_trans_map); > vfree(rrpc->trans_map); > } > > @@ -988,8 +1000,8 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) > { > struct rrpc *rrpc = (struct rrpc *)private; > struct nvm_dev *dev = rrpc->dev; > - struct rrpc_addr *addr = rrpc->trans_map + slba; > - struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; > + struct rrpc_addr *addr; > + struct rrpc_rev_addr *raddr; > u64 elba = slba + nlb; > u64 i; > > @@ -998,8 +1010,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) > return -EINVAL; > } > > + slba -= rrpc->soffset >> (ilog2(dev->sec_size) - 9); > + addr = rrpc->trans_map + slba; > for (i = 0; i < nlb; i++) { > + struct rrpc_lun *rlun; > + struct nvm_lun *lun; > u64 pba = le64_to_cpu(entries[i]); > + u64 poffset; > + int lunid; > + > /* LNVM treats address-spaces as silos, LBA and PBA are > * equally large and zero-indexed. > */ > @@ -1015,8 +1034,18 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) > if (!pba) > continue; > > + lunid = div_u64(pba, dev->sec_per_lun); > + lun = dev->mt->get_lun(dev, lunid); > + > + if (unlikely(!lun)) > + return -EINVAL; > + > + rlun = lun->private; > + raddr = rlun->rev_trans_map; > + poffset = lun_poffset(lun, dev); > + > addr[i].addr = pba; > - raddr[pba].addr = slba + i; > + raddr[pba - poffset].addr = slba + i; > } > > return 0; > @@ -1035,17 +1064,10 @@ static int rrpc_map_init(struct rrpc *rrpc) > if (!rrpc->trans_map) > return -ENOMEM; > > - rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) > - * rrpc->nr_sects); > - if (!rrpc->rev_trans_map) > - return -ENOMEM; > - > for (i = 0; i < rrpc->nr_sects; i++) { > struct rrpc_addr *p = &rrpc->trans_map[i]; > - struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; > > p->addr = ADDR_EMPTY; > - r->addr = ADDR_EMPTY; > } > > if (!dev->ops->get_l2p_tbl) > @@ -1117,8 +1139,8 @@ static void rrpc_core_free(struct rrpc *rrpc) > static void rrpc_luns_free(struct rrpc *rrpc) > { > struct nvm_dev *dev = rrpc->dev; > - struct nvm_lun *lun; > struct rrpc_lun *rlun; > + struct nvm_lun *lun; > int i; > > if (!rrpc->luns) > @@ -1130,25 +1152,69 @@ static void rrpc_luns_free(struct rrpc *rrpc) > if (!lun) > break; > dev->mt->release_lun(dev, lun->id); > + vfree(rlun->rev_trans_map); > vfree(rlun->blocks); > } > > kfree(rrpc->luns); > + rrpc->luns = NULL; > } > > -static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) > +static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun, > + struct nvm_lun *lun) > +{ > + struct nvm_dev *dev = rrpc->dev; > + int i; > + > + rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) * > + dev->sec_per_lun); > + if (!rlun->rev_trans_map) > + return -ENOMEM; > + > + for (i = 0; i < dev->sec_per_lun; i++) { > + struct rrpc_rev_addr *r = &rlun->rev_trans_map[i]; > + > + r->addr = ADDR_EMPTY; > + } > + rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun); > + if (!rlun->blocks) { > + vfree(rlun->rev_trans_map); > + return -ENOMEM; > + } > + > + for (i = 0; i < dev->blks_per_lun; i++) { > + struct rrpc_block *rblk = &rlun->blocks[i]; > + struct nvm_block *blk = &lun->blocks[i]; > + > + rblk->parent = blk; > + rblk->rlun = rlun; > + INIT_LIST_HEAD(&rblk->prio); > + spin_lock_init(&rblk->lock); > + } > + > + rlun->rrpc = rrpc; > + lun->private = rlun; > + INIT_LIST_HEAD(&rlun->prio_list); > + INIT_LIST_HEAD(&rlun->open_list); > + INIT_LIST_HEAD(&rlun->closed_list); > + INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); > + spin_lock_init(&rlun->lock); > + spin_lock_init(&rlun->rev_lock); > + > + return 0; > +} Move all code concerning the per-lun reverse translation map to a new patch. This is orthogonal to the lun creation. > + > +static int rrpc_luns_init(struct rrpc *rrpc, struct nvm_ioctl_create_conf *conf) > { > struct nvm_dev *dev = rrpc->dev; > struct rrpc_lun *rlun; > - int i, j, ret = -EINVAL; > + int i, ret = -EINVAL; > > if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { > pr_err("rrpc: number of pages per block too high."); > return -EINVAL; > } > > - spin_lock_init(&rrpc->rev_lock); > - > rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), > GFP_KERNEL); > if (!rrpc->luns) > @@ -1156,8 +1222,21 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) > > /* 1:1 mapping */ > for (i = 0; i < rrpc->nr_luns; i++) { > - int lunid = lun_begin + i; > struct nvm_lun *lun; > + int lunid; > + > + if (conf->type == NVM_CONFIG_TYPE_SIMPLE) > + lunid = conf->s.lun_begin + i; > + else if (conf->type == NVM_CONFIG_TYPE_LIST) > + lunid = conf->l.lunid[i]; > + else > + goto err; > + > + if (lunid >= dev->nr_luns) { > + pr_err("rrpc: lun out of bound (%u >= %u)\n", > + lunid, dev->nr_luns); > + goto err; > + } > > if (dev->mt->reserve_lun(dev, lunid)) { > pr_err("rrpc: lun %u is already allocated\n", lunid); > @@ -1170,31 +1249,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) > > rlun = &rrpc->luns[i]; > rlun->parent = lun; > - rlun->blocks = vzalloc(sizeof(struct rrpc_block) * > - rrpc->dev->blks_per_lun); > - if (!rlun->blocks) { > - ret = -ENOMEM; > + ret = rrpc_lun_init(rrpc, rlun, lun); > + if (!ret) > goto err; > - } > - > - for (j = 0; j < rrpc->dev->blks_per_lun; j++) { > - struct rrpc_block *rblk = &rlun->blocks[j]; > - struct nvm_block *blk = &lun->blocks[j]; > - > - rblk->parent = blk; > - rblk->rlun = rlun; > - INIT_LIST_HEAD(&rblk->prio); > - spin_lock_init(&rblk->lock); > - } > - > - rlun->rrpc = rrpc; > - INIT_LIST_HEAD(&rlun->prio_list); > - INIT_LIST_HEAD(&rlun->open_list); > - INIT_LIST_HEAD(&rlun->closed_list); > - > - INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); > - spin_lock_init(&rlun->lock); > - > rrpc->total_blocks += dev->blks_per_lun; > rrpc->nr_sects += dev->sec_per_lun; > > @@ -1202,6 +1259,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) > > return 0; > err: > + rrpc_luns_free(rrpc); > return ret; > } > > @@ -1275,14 +1333,16 @@ static sector_t rrpc_capacity(void *private) > static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) > { > struct nvm_dev *dev = rrpc->dev; > + struct rrpc_lun *rlun = rblk->rlun; > int offset; > struct rrpc_addr *laddr; > - u64 paddr, pladdr; > + u64 paddr, pladdr, poffset; > > + poffset = lun_poffset(rlun->parent, dev); > for (offset = 0; offset < dev->pgs_per_blk; offset++) { > paddr = block_to_addr(rrpc, rblk) + offset; > > - pladdr = rrpc->rev_trans_map[paddr].addr; > + pladdr = rlun->rev_trans_map[paddr - poffset].addr; > if (pladdr == ADDR_EMPTY) > continue; > > @@ -1347,7 +1407,7 @@ err: > static struct nvm_tgt_type tt_rrpc; > > static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, > - int lun_begin, int lun_end) > + struct nvm_ioctl_create_conf *conf) > { > struct request_queue *bqueue = dev->q; > struct request_queue *tqueue = tdisk->queue; > @@ -1373,7 +1433,16 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, > spin_lock_init(&rrpc->bio_lock); > INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); > > - rrpc->nr_luns = lun_end - lun_begin + 1; > + if (conf->type == NVM_CONFIG_TYPE_SIMPLE) > + rrpc->nr_luns = > + conf->s.lun_end - conf->s.lun_begin + 1; No need to break the line. > + > + else if (conf->type == NVM_CONFIG_TYPE_LIST) > + rrpc->nr_luns = conf->l.nr_luns; > + else { > + kfree(rrpc); > + return ERR_PTR(-EINVAL); > + } > > /* simple round-robin strategy */ > atomic_set(&rrpc->next_lun, -1); > @@ -1385,15 +1454,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, > } > rrpc->soffset = soffset; > > - ret = rrpc_luns_init(rrpc, lun_begin, lun_end); > + ret = rrpc_luns_init(rrpc, conf); > if (ret) { > pr_err("nvm: rrpc: could not initialize luns\n"); > goto err; > } > > - rrpc->poffset = dev->sec_per_lun * lun_begin; > - rrpc->lun_offset = lun_begin; > - > ret = rrpc_core_init(rrpc); > if (ret) { > pr_err("nvm: rrpc: could not initialize core\n"); > diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h > index 1c4b1c9..75aa131 100644 > --- a/drivers/lightnvm/rrpc.h > +++ b/drivers/lightnvm/rrpc.h > @@ -87,6 +87,10 @@ struct rrpc_lun { > > struct work_struct ws_gc; > > + /* store a reverse map for garbage collection */ > + struct rrpc_rev_addr *rev_trans_map; > + spinlock_t rev_lock; > + > spinlock_t lock; > }; > > @@ -124,9 +128,6 @@ struct rrpc { > * addresses are used when writing to the disk block device. > */ > struct rrpc_addr *trans_map; > - /* also store a reverse map for garbage collection */ > - struct rrpc_rev_addr *rev_trans_map; > - spinlock_t rev_lock; > > struct rrpc_inflight inflights; > > diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h > index 201b2a3..e90a761 100644 > --- a/include/linux/lightnvm.h > +++ b/include/linux/lightnvm.h > @@ -276,6 +276,7 @@ struct nvm_lun { > spinlock_t lock; > > struct nvm_block *blocks; > + void *private; > }; > > enum { > @@ -430,7 +431,8 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) > > typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); > typedef sector_t (nvm_tgt_capacity_fn)(void *); > -typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); > +typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, > + struct nvm_ioctl_create_conf *); > typedef void (nvm_tgt_exit_fn)(void *); > > struct nvm_tgt_type { > diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h > index 774a431..6dbf6a0 100644 > --- a/include/uapi/linux/lightnvm.h > +++ b/include/uapi/linux/lightnvm.h > @@ -35,6 +35,8 @@ > #define NVM_TTYPE_MAX 63 > #define NVM_MMTYPE_LEN 8 > > +#define NVM_LUNS_MAX 768 > + > #define NVM_CTRL_FILE "/dev/lightnvm/control" > > struct nvm_ioctl_info_tgt { > @@ -74,14 +76,21 @@ struct nvm_ioctl_create_simple { > __u32 lun_end; > }; > > +struct nvm_ioctl_create_list { > + __u32 nr_luns; > + __u32 lunid[NVM_LUNS_MAX]; > +}; > + > enum { > NVM_CONFIG_TYPE_SIMPLE = 0, > + NVM_CONFIG_TYPE_LIST, > }; > > struct nvm_ioctl_create_conf { > __u32 type; > union { > struct nvm_ioctl_create_simple s; > + struct nvm_ioctl_create_list l; > }; > }; > > -- > 1.8.3.1 I sent a new patch for 4.5 to generalize rrpc calculations. You might want to pick that up and rebase. Also, you can use the the new features in qemu-nvme [2] to test the multiple LUN configuration. This might help you debugging this patch :) [2] https://github.com/OpenChannelSSD/qemu-nvme/tree/wl_sim Javier
Attachment:
signature.asc
Description: Message signed with OpenPGP using GPGMail