On 12/4/20 6:51 AM, Brian King wrote: > On 12/2/20 8:07 PM, Tyrel Datwyler wrote: >> The logic for iterating over the Sub-CRQ responses is similiar to that >> of the primary CRQ. Add the necessary handlers for processing those >> responses. >> >> Signed-off-by: Tyrel Datwyler <tyreld@xxxxxxxxxxxxx> >> --- >> drivers/scsi/ibmvscsi/ibmvfc.c | 80 ++++++++++++++++++++++++++++++++++ >> 1 file changed, 80 insertions(+) >> >> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c >> index e082935f56cf..b61ae1df21e5 100644 >> --- a/drivers/scsi/ibmvscsi/ibmvfc.c >> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c >> @@ -3381,6 +3381,86 @@ static int ibmvfc_toggle_scrq_irq(struct ibmvfc_sub_queue *scrq, int enable) >> return rc; >> } >> >> +static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) >> +{ >> + struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); >> + unsigned long flags; >> + >> + switch (crq->valid) { >> + case IBMVFC_CRQ_CMD_RSP: >> + break; >> + case IBMVFC_CRQ_XPORT_EVENT: >> + return; >> + default: >> + dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); >> + return; >> + } >> + >> + /* The only kind of payload CRQs we should get are responses to >> + * things we send. Make sure this response is to something we >> + * actually sent >> + */ >> + if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { >> + dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", >> + crq->ioba); >> + return; >> + } >> + >> + if (unlikely(atomic_read(&evt->free))) { >> + dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", >> + crq->ioba); >> + return; >> + } >> + >> + del_timer(&evt->timer); >> + list_del(&evt->queue); >> + ibmvfc_trc_end(evt);> + spin_unlock_irqrestore(vhost->host->host_lock, flags); > > You can't do this here... You are grabbing the host lock in ibmvfc_drain_sub_crq > and saving the irqflags to a local in that function, then doing a spin_unlock_irqrestore > and restoring irqflags using an uninitialized local in this function... > > I'm assuming this will get sorted out with the locking changes we've been discussing off-list... Correct, moving to per-queue locks and flags stored in the queue struct. -Tyrel > > >> + evt->done(evt); >> + spin_lock_irqsave(vhost->host->host_lock, flags); >> +} >> + >> +static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_sub_queue *scrq) >> +{ >> + struct ibmvfc_crq *crq; >> + >> + crq = &scrq->msgs[scrq->cur].crq; >> + if (crq->valid & 0x80) { >> + if (++scrq->cur == scrq->size) >> + scrq->cur = 0; >> + rmb(); >> + } else >> + crq = NULL; >> + >> + return crq; >> +} >> + >> +static void ibmvfc_drain_sub_crq(struct ibmvfc_sub_queue *scrq) >> +{ >> + struct ibmvfc_crq *crq; >> + unsigned long flags; >> + int done = 0; >> + >> + spin_lock_irqsave(scrq->vhost->host->host_lock, flags); >> + while (!done) { >> + while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { >> + ibmvfc_handle_scrq(crq, scrq->vhost); >> + crq->valid = 0; >> + wmb(); >> + } >> + >> + ibmvfc_toggle_scrq_irq(scrq, 1); >> + if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { >> + ibmvfc_toggle_scrq_irq(scrq, 0); >> + ibmvfc_handle_scrq(crq, scrq->vhost); >> + crq->valid = 0; >> + wmb(); >> + } else >> + done = 1; >> + } >> + spin_unlock_irqrestore(scrq->vhost->host->host_lock, flags); >> +} >> + >> /** >> * ibmvfc_init_tgt - Set the next init job step for the target >> * @tgt: ibmvfc target struct >> > >