Currently pmem_do_write() is written with assumption that all I/O is sector aligned. Soon I want to use this function in zero_page_range() where range passed in does not have to be sector aligned. Modify this function to be able to deal with an arbitrary range. Which is specified by pmem_off and len. Signed-off-by: Vivek Goyal <vgoyal@xxxxxxxxxx> --- drivers/nvdimm/pmem.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 075b11682192..fae8f67da9de 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -154,15 +154,23 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem, static blk_status_t pmem_do_write(struct pmem_device *pmem, struct page *page, unsigned int page_off, - sector_t sector, unsigned int len) + u64 pmem_off, unsigned int len) { blk_status_t rc = BLK_STS_OK; bool bad_pmem = false; - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; - void *pmem_addr = pmem->virt_addr + pmem_off; - - if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) - bad_pmem = true; + phys_addr_t pmem_real_off = pmem_off + pmem->data_offset; + void *pmem_addr = pmem->virt_addr + pmem_real_off; + sector_t sector_start, sector_end; + unsigned nr_sectors; + + sector_start = DIV_ROUND_UP(pmem_off, SECTOR_SIZE); + sector_end = (pmem_off + len) >> SECTOR_SHIFT; + if (sector_end > sector_start) { + nr_sectors = sector_end - sector_start; + if (is_bad_pmem(&pmem->bb, sector_start, + nr_sectors << SECTOR_SHIFT)) + bad_pmem = true; + } /* * Note that we write the data both before and after @@ -181,7 +189,13 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, flush_dcache_page(page); write_pmem(pmem_addr, page, page_off, len); if (unlikely(bad_pmem)) { - rc = pmem_clear_poison(pmem, pmem_off, len); + /* + * Pass sector aligned offset and length. That seems + * to work as of now. Other finer grained alignment + * cases can be addressed later if need be. + */ + rc = pmem_clear_poison(pmem, ALIGN(pmem_real_off, SECTOR_SIZE), + nr_sectors << SECTOR_SHIFT); write_pmem(pmem_addr, page, page_off, len); } @@ -206,7 +220,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) bio_for_each_segment(bvec, bio, iter) { if (op_is_write(bio_op(bio))) rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, - iter.bi_sector, bvec.bv_len); + iter.bi_sector << SECTOR_SHIFT, bvec.bv_len); else rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, iter.bi_sector, bvec.bv_len); @@ -235,7 +249,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, blk_status_t rc; if (op_is_write(op)) - rc = pmem_do_write(pmem, page, 0, sector, + rc = pmem_do_write(pmem, page, 0, sector << SECTOR_SHIFT, hpage_nr_pages(page) * PAGE_SIZE); else rc = pmem_do_read(pmem, page, 0, sector, -- 2.20.1 -- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel