On Tue, May 14, 2024 at 01:53:17PM -0400, John Meneghini wrote: > @@ -130,6 +133,7 @@ void nvme_mpath_start_request(struct request *rq) > if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) > return; > > + atomic_inc(&ns->ctrl->nr_active); Why skip passthrough and stats? And I think you should squash the follow up patch that constrains the atomics to the queue-depth path selector. > +static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head) > +{ > + struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns; > + unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX; > + unsigned int depth; > + > + list_for_each_entry_rcu(ns, &head->list, siblings) { > + if (nvme_path_is_disabled(ns)) > + continue; > + > + depth = atomic_read(&ns->ctrl->nr_active); > + > + switch (ns->ana_state) { > + case NVME_ANA_OPTIMIZED: > + if (depth < min_depth_opt) { > + min_depth_opt = depth; > + best_opt = ns; > + } > + break; > + > + case NVME_ANA_NONOPTIMIZED: > + if (depth < min_depth_nonopt) { > + min_depth_nonopt = depth; > + best_nonopt = ns; > + } > + break; > + default: > + break; > + } > + } > + I think you can do the atomic_inc here so you don't have to check the io policy a 2nd time. > + return best_opt ? best_opt : best_nonopt; > +}