This patch avoids that a kernel warning appears when smp_processor_id() is called with preempt debugging enabled. Cc: James Smart <james.smart@xxxxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- drivers/scsi/lpfc/lpfc_nvme.c | 8 ++++---- drivers/scsi/lpfc/lpfc_nvmet.c | 16 ++++++++-------- drivers/scsi/lpfc/lpfc_scsi.c | 6 +++--- drivers/scsi/lpfc/lpfc_sli.c | 6 +++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index be188843ce28..8a123ff98250 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -229,7 +229,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, if (qhandle == NULL) return -ENOMEM; - qhandle->cpu_id = smp_processor_id(); + qhandle->cpu_id = raw_smp_processor_id(); qhandle->qidx = qidx; /* * NVME qidx == 0 is the admin queue, so both admin queue @@ -1143,7 +1143,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { uint32_t cpu; idx = lpfc_ncmd->cur_iocbq.hba_wqidx; - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT) { if (lpfc_ncmd->cpu != cpu) lpfc_printf_vlog(vport, @@ -1561,7 +1561,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { idx = lpfc_queue_info->index; } else { - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); idx = phba->sli4_hba.cpu_map[cpu].hdwq; } @@ -1641,7 +1641,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT) { lpfc_ncmd->cpu = cpu; if (idx != cpu) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index c125598089e2..d74bfd264495 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -433,7 +433,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) * Use the CPU context list, from the MRQ the IO was received on * (ctxp->idx), to save context structure. */ - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); @@ -763,7 +763,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - id = smp_processor_id(); + id = raw_smp_processor_id(); if (id < LPFC_CHECK_CPU_CNT) { if (ctxp->cpu != id) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, @@ -904,7 +904,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - int id = smp_processor_id(); + int id = raw_smp_processor_id(); if (id < LPFC_CHECK_CPU_CNT) { if (rsp->hwqid != id) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, @@ -1118,7 +1118,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", - ctxp->oxid, ctxp->size, smp_processor_id()); + ctxp->oxid, ctxp->size, raw_smp_processor_id()); if (!nvmebuf) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, @@ -1594,7 +1594,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", - xri, smp_processor_id(), 0); + xri, raw_smp_processor_id(), 0); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); @@ -1610,7 +1610,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", - xri, smp_processor_id(), 1); + xri, raw_smp_processor_id(), 1); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); @@ -2044,7 +2044,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, * be empty, thus it would need to be replenished with the * context list from another CPU for this MRQ. */ - current_cpu = smp_processor_id(); + current_cpu = raw_smp_processor_id(); current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag); if (current_infop->nvmet_ctx_list_cnt) { @@ -2074,7 +2074,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, #endif lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", - oxid, size, smp_processor_id()); + oxid, size, raw_smp_processor_id()); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a5ad6d972e1b..08644e1951cd 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -688,7 +688,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t sgl_size, cpu, idx; int tag; - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { tag = blk_mq_unique_tag(cmnd->request); idx = blk_mq_unique_tag_to_hwq(tag); @@ -3669,7 +3669,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq) phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; } @@ -4464,7 +4464,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT) { struct lpfc_sli4_hdw_queue *hdwq = &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no]; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fc74344d6587..2acda188b0dc 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13542,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0390 Cannot schedule soft IRQ " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, smp_processor_id()); + cqid, cq->queue_id, raw_smp_processor_id()); } /** @@ -14091,7 +14091,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0363 Cannot schedule soft IRQ " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, smp_processor_id()); + cqid, cq->queue_id, raw_smp_processor_id()); } /** @@ -14230,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) eqi = phba->sli4_hba.eq_info; icnt = this_cpu_inc_return(eqi->icnt); - fpeq->last_cpu = smp_processor_id(); + fpeq->last_cpu = raw_smp_processor_id(); if (icnt > LPFC_EQD_ISR_TRIGGER && phba->cfg_irq_chann == 1 && -- 2.21.0.196.g041f5ea1cf98