On 4/16/22 05:17, Bart Van Assche wrote: > Deriving the meaning of the nr_zones, rev_nr_zones, zone_blocks and > rev_zone_blocks member variables requires careful analysis of the source > code. Make the meaning of these member variables easier to understand by > introducing struct zoned_disk_info. > > Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> > --- > drivers/scsi/sd.h | 18 +++++++++++++---- > drivers/scsi/sd_zbc.c | 47 ++++++++++++++++++++----------------------- > 2 files changed, 36 insertions(+), 29 deletions(-) > > diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h > index 4849cbe771a7..2e983578a699 100644 > --- a/drivers/scsi/sd.h > +++ b/drivers/scsi/sd.h > @@ -67,6 +67,16 @@ enum { > SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */ > }; > > +/** > + * struct zoned_disk_info - Properties of a zoned SCSI disk. Nit: you could say "ZBC SCSI device" to be more inline with standards vocabulary here instead of "zoned SCSI disk". > + * @nr_zones: number of zones. > + * @zone_blocks: number of logical blocks per zone. > + */ > +struct zoned_disk_info { > + u32 nr_zones; > + u32 zone_blocks; > +}; > + > struct scsi_disk { > struct scsi_device *device; > > @@ -78,10 +88,10 @@ struct scsi_disk { > struct gendisk *disk; > struct opal_dev *opal_dev; > #ifdef CONFIG_BLK_DEV_ZONED > - u32 nr_zones; > - u32 rev_nr_zones; > - u32 zone_blocks; > - u32 rev_zone_blocks; > + /* Updated during revalidation before the gendisk capacity is known. */ > + struct zoned_disk_info early_zone_info; > + /* Updated during revalidation after the gendisk capacity is known. */ > + struct zoned_disk_info zone_info; > u32 zones_optimal_open; > u32 zones_optimal_nonseq; > u32 zones_max_open; Nit: It would be nice to pack everything under the #ifdef into the same structure... > diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c > index c1f295859b27..fe46b4b9913a 100644 > --- a/drivers/scsi/sd_zbc.c > +++ b/drivers/scsi/sd_zbc.c > @@ -180,7 +180,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, > * sure that the allocated buffer can always be mapped by limiting the > * number of pages allocated to the HBA max segments limit. > */ > - nr_zones = min(nr_zones, sdkp->nr_zones); > + nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); > bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); > bufsize = min_t(size_t, bufsize, > queue_max_hw_sectors(q) << SECTOR_SHIFT); > @@ -205,7 +205,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, > */ > static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) > { > - return logical_to_sectors(sdkp->device, sdkp->zone_blocks); > + return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks); > } > > /** > @@ -321,14 +321,14 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) > sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); > > spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); > - for (zno = 0; zno < sdkp->nr_zones; zno++) { > + for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) { > if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) > continue; > > spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); > ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, > SD_BUF_SIZE, > - zno * sdkp->zone_blocks, true); > + zno * sdkp->zone_info.zone_blocks, true); > spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); > if (!ret) > sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, > @@ -395,7 +395,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, > break; > default: > wp_offset = sectors_to_logical(sdkp->device, wp_offset); > - if (wp_offset + nr_blocks > sdkp->zone_blocks) { > + if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) { > ret = BLK_STS_IOERR; > break; > } > @@ -524,7 +524,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, > break; > case REQ_OP_ZONE_RESET_ALL: > memset(sdkp->zones_wp_offset, 0, > - sdkp->nr_zones * sizeof(unsigned int)); > + sdkp->zone_info.nr_zones * sizeof(unsigned int)); > break; > default: > break; > @@ -681,16 +681,16 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp) > if (!sd_is_zoned(sdkp) || !sdkp->capacity) > return; > > - if (sdkp->capacity & (sdkp->zone_blocks - 1)) > + if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1)) > sd_printk(KERN_NOTICE, sdkp, > "%u zones of %u logical blocks + 1 runt zone\n", > - sdkp->nr_zones - 1, > - sdkp->zone_blocks); > + sdkp->zone_info.nr_zones - 1, > + sdkp->zone_info.zone_blocks); > else > sd_printk(KERN_NOTICE, sdkp, > "%u zones of %u logical blocks\n", > - sdkp->nr_zones, > - sdkp->zone_blocks); > + sdkp->zone_info.nr_zones, > + sdkp->zone_info.zone_blocks); > } > > static int sd_zbc_init_disk(struct scsi_disk *sdkp) > @@ -717,10 +717,8 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp) > kfree(sdkp->zone_wp_update_buf); > sdkp->zone_wp_update_buf = NULL; > > - sdkp->nr_zones = 0; > - sdkp->rev_nr_zones = 0; > - sdkp->zone_blocks = 0; > - sdkp->rev_zone_blocks = 0; > + sdkp->early_zone_info = (struct zoned_disk_info){ }; > + sdkp->zone_info = (struct zoned_disk_info){ }; > > mutex_unlock(&sdkp->rev_mutex); > } > @@ -747,8 +745,8 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) > { > struct gendisk *disk = sdkp->disk; > struct request_queue *q = disk->queue; > - u32 zone_blocks = sdkp->rev_zone_blocks; > - unsigned int nr_zones = sdkp->rev_nr_zones; > + u32 zone_blocks = sdkp->early_zone_info.zone_blocks; > + unsigned int nr_zones = sdkp->early_zone_info.nr_zones; > u32 max_append; > int ret = 0; > unsigned int flags; > @@ -779,14 +777,14 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) > */ > mutex_lock(&sdkp->rev_mutex); > > - if (sdkp->zone_blocks == zone_blocks && > - sdkp->nr_zones == nr_zones && > + if (sdkp->zone_info.zone_blocks == zone_blocks && > + sdkp->zone_info.nr_zones == nr_zones && > disk->queue->nr_zones == nr_zones) > goto unlock; > > flags = memalloc_noio_save(); > - sdkp->zone_blocks = zone_blocks; > - sdkp->nr_zones = nr_zones; > + sdkp->zone_info.zone_blocks = zone_blocks; > + sdkp->zone_info.nr_zones = nr_zones; > sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL); > if (!sdkp->rev_wp_offset) { > ret = -ENOMEM; > @@ -801,8 +799,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) > sdkp->rev_wp_offset = NULL; > > if (ret) { > - sdkp->zone_blocks = 0; > - sdkp->nr_zones = 0; > + sdkp->zone_info = (struct zoned_disk_info){ }; > sdkp->capacity = 0; > goto unlock; > } > @@ -888,8 +885,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) > if (blk_queue_zoned_model(q) == BLK_ZONED_HM) > blk_queue_zone_write_granularity(q, sdkp->physical_block_size); > > - sdkp->rev_nr_zones = nr_zones; > - sdkp->rev_zone_blocks = zone_blocks; > + sdkp->early_zone_info.nr_zones = nr_zones; > + sdkp->early_zone_info.zone_blocks = zone_blocks; > > return 0; > With or without the nit addressed, Reviewed-by: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> -- Damien Le Moal Western Digital Research