On Tue, 2018-05-15 at 09:00 -0700, Matthew Wilcox wrote: > diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c > index 025dc2d3f3de..cdf671c2af61 100644 > --- a/drivers/scsi/qla2xxx/qla_target.c > +++ b/drivers/scsi/qla2xxx/qla_target.c > @@ -3719,7 +3719,8 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) > return; > } > cmd->jiffies_at_free = get_jiffies_64(); > - percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); > + sbitmap_queue_clear(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag, > + cmd->se_cmd.map_cpu); > } > EXPORT_SYMBOL(qlt_free_cmd); Please introduce functions in the target core for allocating and freeing a tag instead of spreading the knowledge of how to allocate and free tags over all target drivers. > +int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup) > +{ > + int tag = -1; > + DEFINE_WAIT(wait); > + struct sbq_wait_state *ws; > + > + if (state == TASK_RUNNING) > + return tag; > + > + ws = &se_sess->sess_tag_pool.ws[0]; > + for (;;) { > + prepare_to_wait_exclusive(&ws->wait, &wait, state); > + if (signal_pending_state(state, current)) > + break; This looks weird to me. Shouldn't target code ignore signals instead of causing tag allocation to fail if a signal is received? > + schedule(); > + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup); > + } > + > + finish_wait(&ws->wait, &wait); > + return tag; > +} Thanks, Bart. _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization