]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
scsi: lpfc: Change smp_processor_id() into raw_smp_processor_id()
authorBart Van Assche <bvanassche@acm.org>
Thu, 28 Mar 2019 18:06:22 +0000 (11:06 -0700)
committerMartin K. Petersen <martin.petersen@oracle.com>
Thu, 4 Apr 2019 03:11:36 +0000 (23:11 -0400)
This patch avoids that a kernel warning appears when smp_processor_id() is
called with preempt debugging enabled.

Cc: James Smart <james.smart@broadcom.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Acked-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c

index be188843ce28c4bcfbfd95c16ad850d2095321c1..8a123ff9825020ebdc8c8f73bc850c02924fa7c4 100644 (file)
@@ -229,7 +229,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
        if (qhandle == NULL)
                return -ENOMEM;
 
-       qhandle->cpu_id = smp_processor_id();
+       qhandle->cpu_id = raw_smp_processor_id();
        qhandle->qidx = qidx;
        /*
         * NVME qidx == 0 is the admin queue, so both admin queue
@@ -1143,7 +1143,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
                uint32_t cpu;
                idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT) {
                        if (lpfc_ncmd->cpu != cpu)
                                lpfc_printf_vlog(vport,
@@ -1561,7 +1561,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
                idx = lpfc_queue_info->index;
        } else {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                idx = phba->sli4_hba.cpu_map[cpu].hdwq;
        }
 
@@ -1641,7 +1641,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
 
        if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT) {
                        lpfc_ncmd->cpu = cpu;
                        if (idx != cpu)
index c125598089e27c13928766f41367d0b8fdcd7a76..d74bfd2644950804da7c8c6023c5b6537e3e9138 100644 (file)
@@ -433,7 +433,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
         * Use the CPU context list, from the MRQ the IO was received on
         * (ctxp->idx), to save context structure.
         */
-       cpu = smp_processor_id();
+       cpu = raw_smp_processor_id();
        infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
        spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
        list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
@@ -763,7 +763,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        }
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-               id = smp_processor_id();
+               id = raw_smp_processor_id();
                if (id < LPFC_CHECK_CPU_CNT) {
                        if (ctxp->cpu != id)
                                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -904,7 +904,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
                ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
 
        if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-               int id = smp_processor_id();
+               int id = raw_smp_processor_id();
                if (id < LPFC_CHECK_CPU_CNT) {
                        if (rsp->hwqid != id)
                                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -1118,7 +1118,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
 
 
        lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
-                        ctxp->oxid, ctxp->size, smp_processor_id());
+                        ctxp->oxid, ctxp->size, raw_smp_processor_id());
 
        if (!nvmebuf) {
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -1594,7 +1594,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
 
                lpfc_nvmeio_data(phba,
                        "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-                       xri, smp_processor_id(), 0);
+                       xri, raw_smp_processor_id(), 0);
 
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                                "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
@@ -1610,7 +1610,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
        spin_unlock_irqrestore(&phba->hbalock, iflag);
 
        lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-                        xri, smp_processor_id(), 1);
+                        xri, raw_smp_processor_id(), 1);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                        "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
@@ -2044,7 +2044,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
         * be empty, thus it would need to be replenished with the
         * context list from another CPU for this MRQ.
         */
-       current_cpu = smp_processor_id();
+       current_cpu = raw_smp_processor_id();
        current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
        spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
        if (current_infop->nvmet_ctx_list_cnt) {
@@ -2074,7 +2074,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 #endif
 
        lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
-                        oxid, size, smp_processor_id());
+                        oxid, size, raw_smp_processor_id());
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 
index a5ad6d972e1ba333c325171804deb23f608bc967..08644e1951cd672800c5380b4fd2b1300fb9474a 100644 (file)
@@ -688,7 +688,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        uint32_t sgl_size, cpu, idx;
        int tag;
 
-       cpu = smp_processor_id();
+       cpu = raw_smp_processor_id();
        if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
                tag = blk_mq_unique_tag(cmnd->request);
                idx = blk_mq_unique_tag_to_hwq(tag);
@@ -3669,7 +3669,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
                        phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
        }
@@ -4464,7 +4464,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT) {
                        struct lpfc_sli4_hdw_queue *hdwq =
                                        &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
index fc74344d6587d4fbb88efeb500090176f9826f68..2acda188b0dcfcf33c84fc9f0af4283a0ee7d42b 100644 (file)
@@ -13542,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0390 Cannot schedule soft IRQ "
                                "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-                               cqid, cq->queue_id, smp_processor_id());
+                               cqid, cq->queue_id, raw_smp_processor_id());
 }
 
 /**
@@ -14091,7 +14091,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0363 Cannot schedule soft IRQ "
                                "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-                               cqid, cq->queue_id, smp_processor_id());
+                               cqid, cq->queue_id, raw_smp_processor_id());
 }
 
 /**
@@ -14230,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 
        eqi = phba->sli4_hba.eq_info;
        icnt = this_cpu_inc_return(eqi->icnt);
-       fpeq->last_cpu = smp_processor_id();
+       fpeq->last_cpu = raw_smp_processor_id();
 
        if (icnt > LPFC_EQD_ISR_TRIGGER &&
            phba->cfg_irq_chann == 1 &&