On 2021/06/25 2:23, Niklas Cassel wrote: > From: Niklas Cassel <niklas.cassel@xxxxxxx> > > Create a local zbdi variable for f->zbd_info for the following functions: > zbd_open_zone(), zbd_convert_to_open_zone(), and zbd_adjust_block(). > > This avoids unnecessary indirections. > > No functional change intended. > > Signed-off-by: Niklas Cassel <niklas.cassel@xxxxxxx> > --- > zbd.c | 69 +++++++++++++++++++++++++++++++---------------------------- > 1 file changed, 36 insertions(+), 33 deletions(-) > > diff --git a/zbd.c b/zbd.c > index 8e99eb95..f09b0ee6 100644 > --- a/zbd.c > +++ b/zbd.c > @@ -1114,6 +1114,7 @@ static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f, > uint32_t zone_idx) > { > const uint32_t min_bs = td->o.min_bs[DDIR_WRITE]; > + struct zoned_block_device_info *zbdi = f->zbd_info; > struct fio_zone_info *z = get_zone(f, zone_idx); > bool res = true; > > @@ -1127,7 +1128,7 @@ static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f, > if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs)) > return false; > > - pthread_mutex_lock(&f->zbd_info->mutex); > + pthread_mutex_lock(&zbdi->mutex); > if (is_zone_open(td, f, zone_idx)) { > /* > * If the zone is already open and going to be full by writes > @@ -1142,16 +1143,16 @@ static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f, > if (td->o.job_max_open_zones > 0 && > td->num_open_zones >= td->o.job_max_open_zones) > goto out; > - if (f->zbd_info->num_open_zones >= f->zbd_info->max_open_zones) > + if (zbdi->num_open_zones >= zbdi->max_open_zones) > goto out; > dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx); > - f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx; > + zbdi->open_zones[zbdi->num_open_zones++] = zone_idx; > td->num_open_zones++; > z->open = 1; > res = true; > > out: > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > return res; > } > > @@ -1175,6 +1176,7 @@ static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td, > { > const uint32_t min_bs = td->o.min_bs[io_u->ddir]; > struct fio_file *f = io_u->file; > + struct zoned_block_device_info *zbdi = f->zbd_info; > struct fio_zone_info *z; > unsigned int open_zone_idx = -1; > uint32_t zone_idx, new_zone_idx; > @@ -1185,10 +1187,10 @@ static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td, > > if (td->o.max_open_zones || td->o.job_max_open_zones) { > /* > - * This statement accesses f->zbd_info->open_zones[] on purpose > + * This statement accesses zbdi->open_zones[] on purpose > * without locking. > */ > - zone_idx = f->zbd_info->open_zones[pick_random_zone_idx(f, io_u)]; > + zone_idx = zbdi->open_zones[pick_random_zone_idx(f, io_u)]; > } else { > zone_idx = zbd_zone_idx(f, io_u->offset); > } > @@ -1200,9 +1202,9 @@ static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td, > __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen); > > /* > - * Since z->mutex is the outer lock and f->zbd_info->mutex the inner > + * Since z->mutex is the outer lock and zbdi->mutex the inner > * lock it can happen that the state of the zone with index zone_idx > - * has changed after 'z' has been assigned and before f->zbd_info->mutex > + * has changed after 'z' has been assigned and before zbdi->mutex > * has been obtained. Hence the loop. > */ > for (;;) { > @@ -1211,12 +1213,12 @@ static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td, > z = get_zone(f, zone_idx); > if (z->has_wp) > zone_lock(td, f, z); > - pthread_mutex_lock(&f->zbd_info->mutex); > + pthread_mutex_lock(&zbdi->mutex); > if (z->has_wp) { > if (z->cond != ZBD_ZONE_COND_OFFLINE && > td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0) > goto examine_zone; > - if (f->zbd_info->num_open_zones == 0) { > + if (zbdi->num_open_zones == 0) { > dprint(FD_ZBD, "%s(%s): no zones are open\n", > __func__, f->file_name); > goto open_other_zone; > @@ -1230,14 +1232,14 @@ static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td, > */ > open_zone_idx = pick_random_zone_idx(f, io_u); > assert(!open_zone_idx || > - open_zone_idx < f->zbd_info->num_open_zones); > + open_zone_idx < zbdi->num_open_zones); > tmp_idx = open_zone_idx; > - for (i = 0; i < f->zbd_info->num_open_zones; i++) { > + for (i = 0; i < zbdi->num_open_zones; i++) { > uint32_t tmpz; > > - if (tmp_idx >= f->zbd_info->num_open_zones) > + if (tmp_idx >= zbdi->num_open_zones) > tmp_idx = 0; > - tmpz = f->zbd_info->open_zones[tmp_idx]; > + tmpz = zbdi->open_zones[tmp_idx]; > if (f->min_zone <= tmpz && tmpz < f->max_zone) { > open_zone_idx = tmp_idx; > goto found_candidate_zone; > @@ -1248,39 +1250,39 @@ static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td, > > dprint(FD_ZBD, "%s(%s): no candidate zone\n", > __func__, f->file_name); > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > if (z->has_wp) > zone_unlock(z); > return NULL; > > found_candidate_zone: > - new_zone_idx = f->zbd_info->open_zones[open_zone_idx]; > + new_zone_idx = zbdi->open_zones[open_zone_idx]; > if (new_zone_idx == zone_idx) > break; > zone_idx = new_zone_idx; > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > if (z->has_wp) > zone_unlock(z); > } > > - /* Both z->mutex and f->zbd_info->mutex are held. */ > + /* Both z->mutex and zbdi->mutex are held. */ > > examine_zone: > if (z->wp + min_bs <= zbd_zone_capacity_end(z)) { > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > goto out; > } > > open_other_zone: > /* Check if number of open zones reaches one of limits. */ > wait_zone_close = > - f->zbd_info->num_open_zones == f->max_zone - f->min_zone || > + zbdi->num_open_zones == f->max_zone - f->min_zone || > (td->o.max_open_zones && > - f->zbd_info->num_open_zones == td->o.max_open_zones) || > + zbdi->num_open_zones == td->o.max_open_zones) || > (td->o.job_max_open_zones && > td->num_open_zones == td->o.job_max_open_zones); > > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > > /* Only z->mutex is held. */ > > @@ -1295,7 +1297,7 @@ open_other_zone: > } > > /* Zone 'z' is full, so try to open a new zone. */ > - for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) { > + for (i = f->io_size / zbdi->zone_size; i > 0; i--) { > zone_idx++; > if (z->has_wp) > zone_unlock(z); > @@ -1318,12 +1320,12 @@ open_other_zone: > /* Only z->mutex is held. */ > > /* Check whether the write fits in any of the already opened zones. */ > - pthread_mutex_lock(&f->zbd_info->mutex); > - for (i = 0; i < f->zbd_info->num_open_zones; i++) { > - zone_idx = f->zbd_info->open_zones[i]; > + pthread_mutex_lock(&zbdi->mutex); > + for (i = 0; i < zbdi->num_open_zones; i++) { > + zone_idx = zbdi->open_zones[i]; > if (zone_idx < f->min_zone || zone_idx >= f->max_zone) > continue; > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > zone_unlock(z); > > z = get_zone(f, zone_idx); > @@ -1331,9 +1333,9 @@ open_other_zone: > zone_lock(td, f, z); > if (z->wp + min_bs <= zbd_zone_capacity_end(z)) > goto out; > - pthread_mutex_lock(&f->zbd_info->mutex); > + pthread_mutex_lock(&zbdi->mutex); > } > - pthread_mutex_unlock(&f->zbd_info->mutex); > + pthread_mutex_unlock(&zbdi->mutex); > zone_unlock(z); > dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__, > f->file_name); > @@ -1683,6 +1685,7 @@ enum fio_ddir zbd_adjust_ddir(struct thread_data *td, struct io_u *io_u, > enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u) > { > struct fio_file *f = io_u->file; > + struct zoned_block_device_info *zbdi = f->zbd_info; > uint32_t zone_idx_b; > struct fio_zone_info *zb, *zl, *orig_zb; > uint32_t orig_len = io_u->buflen; > @@ -1690,7 +1693,7 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u) > uint64_t new_len; > int64_t range; > > - assert(f->zbd_info); > + assert(zbdi); > assert(min_bs); > assert(is_valid_offset(f, io_u->offset)); > assert(io_u->buflen); > @@ -1802,12 +1805,12 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u) > assert(io_u->offset + io_u->buflen <= zb->wp); > goto accept; > case DDIR_WRITE: > - if (io_u->buflen > f->zbd_info->zone_size) { > + if (io_u->buflen > zbdi->zone_size) { > td_verror(td, EINVAL, "I/O buflen exceeds zone size"); > dprint(FD_IO, > "%s: I/O buflen %llu exceeds zone size %llu\n", > f->file_name, io_u->buflen, > - (unsigned long long) f->zbd_info->zone_size); > + (unsigned long long) zbdi->zone_size); > goto eof; > } > if (!zbd_open_zone(td, f, zone_idx_b)) { > @@ -1822,7 +1825,7 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u) > } > /* Check whether the zone reset threshold has been exceeded */ > if (td->o.zrf.u.f) { > - if (f->zbd_info->wp_sectors_with_data >= > + if (zbdi->wp_sectors_with_data >= > f->io_size * td->o.zrt.u.f && > zbd_dec_and_reset_write_cnt(td, f)) { > zb->reset_zone = 1; > Looks good. Reviewed-by: Damien Le Moal <damien.lemoal@xxxxxxx> -- Damien Le Moal Western Digital Research