The driver provides overwatch of the cm behavior by maintaining a set of rx io statistics. This information is also used in later updating of the cm statistics buffer. Co-developed-by: Justin Tee <justin.tee@xxxxxxxxxxxx> Signed-off-by: Justin Tee <justin.tee@xxxxxxxxxxxx> Signed-off-by: James Smart <jsmart2021@xxxxxxxxx> --- drivers/scsi/lpfc/lpfc.h | 21 +++++++++++++++ drivers/scsi/lpfc/lpfc_init.c | 50 +++++++++++++++++++++++++++++++++++ drivers/scsi/lpfc/lpfc_mem.c | 4 +++ drivers/scsi/lpfc/lpfc_scsi.c | 2 ++ drivers/scsi/lpfc/lpfc_sli.c | 15 +++++++++++ 5 files changed, 92 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 298b908e9126..640075885540 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1546,6 +1546,12 @@ struct lpfc_hba { u32 cgn_sig_freq; u32 cgn_acqe_cnt; + /* RX monitor handling for CMF */ + struct rxtable_entry *rxtable; /* RX_monitor information */ + atomic_t rxtable_idx_head; +#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73) + atomic_t rxtable_idx_tail; + atomic_t rx_max_read_cnt; /* Maximum read bytes */ uint64_t rx_block_cnt; /* Congestion parameters from flash */ @@ -1591,6 +1597,21 @@ struct lpfc_hba { struct dbg_log_ent dbg_log[DBG_LOG_SZ]; }; +#define LPFC_MAX_RXMONITOR_ENTRY 800 +struct rxtable_entry { + uint64_t total_bytes; /* Total no of read bytes requested */ + uint64_t rcv_bytes; /* Total no of read bytes completed */ + uint64_t avg_io_size; + uint64_t avg_io_latency;/* Average io latency in microseconds */ + uint64_t max_read_cnt; /* Maximum read bytes */ + uint64_t max_bytes_per_interval; + uint32_t cmf_busy; + uint32_t cmf_info; /* CMF_SYNC_WQE info */ + uint32_t io_cnt; + uint32_t timer_utilization; + uint32_t timer_interval; +}; + static inline struct Scsi_Host * lpfc_shost_from_vport(struct lpfc_vport *vport) { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 65d5ee9b3e30..b4b5326cb57c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -5452,9 +5452,13 @@ lpfc_cmf_timer(struct hrtimer *timer) { struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, cmf_timer); + struct rxtable_entry *entry; uint32_t io_cnt; + uint32_t head, tail; + uint32_t busy, max_read; uint64_t total, rcv, lat, mbpi; int timer_interval = LPFC_CMF_INTERVAL; + uint32_t ms; struct lpfc_cgn_stat *cgs; int cpu; @@ -5479,6 +5483,14 @@ lpfc_cmf_timer(struct hrtimer *timer) */ atomic_set(&phba->cmf_stop_io, 1); + /* First we need to calculate the actual ms between + * the last timer interrupt and this one. We ask for + * LPFC_CMF_INTERVAL, however the actual time may + * vary depending on system overhead. + */ + ms = lpfc_calc_cmf_latency(phba); + + /* Immediately after we calculate the time since the last * timer interrupt, set the start time for the next * interrupt @@ -5525,6 +5537,8 @@ lpfc_cmf_timer(struct hrtimer *timer) atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); atomic64_add(lat, &phba->cgn_latency_evt); } + busy = atomic_xchg(&phba->cmf_busy, 0); + max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); /* Calculate MBPI for the next timer interval */ if (mbpi) { @@ -5539,6 +5553,42 @@ lpfc_cmf_timer(struct hrtimer *timer) phba->cmf_max_bytes_per_interval = mbpi; } + /* Save rxmonitor information for debug */ + if (phba->rxtable) { + head = atomic_xchg(&phba->rxtable_idx_head, + LPFC_RXMONITOR_TABLE_IN_USE); + entry = &phba->rxtable[head]; + entry->total_bytes = total; + entry->rcv_bytes = rcv; + entry->cmf_busy = busy; + entry->cmf_info = phba->cmf_active_info; + if (io_cnt) { + entry->avg_io_latency = lat / io_cnt; + entry->avg_io_size = rcv / io_cnt; + } else { + entry->avg_io_latency = 0; + entry->avg_io_size = 0; + } + entry->max_read_cnt = max_read; + entry->io_cnt = io_cnt; + entry->max_bytes_per_interval = mbpi; + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + entry->timer_utilization = phba->cmf_last_ts; + else + entry->timer_utilization = ms; + entry->timer_interval = ms; + phba->cmf_last_ts = 0; + + /* Increment rxtable index */ + head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; + tail = atomic_read(&phba->rxtable_idx_tail); + if (head == tail) { + tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; + atomic_set(&phba->rxtable_idx_tail, tail); + } + atomic_set(&phba->rxtable_idx_head, head); + } + if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { /* If Monitor mode, check if we are oversubscribed * against the full line rate. diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index bbb181ab334b..7cb9f4b52b49 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -344,6 +344,10 @@ lpfc_mem_free_all(struct lpfc_hba *phba) phba->cgn_i = NULL; } + /* Free RX table */ + kfree(phba->rxtable); + phba->rxtable = NULL; + /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 11f26582e68c..e85c911f3b6c 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3981,6 +3981,8 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) atomic_inc(&phba->cmf_busy); return -EBUSY; } + if (size > atomic_read(&phba->rx_max_read_cnt)) + atomic_set(&phba->rx_max_read_cnt, size); } cgs = this_cpu_ptr(phba->cmf_stat); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6d256a509ea3..3dfb90a04783 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8066,6 +8066,21 @@ lpfc_cmf_setup(struct lpfc_hba *phba) atomic64_set(&phba->cgn_latency_evt, 0); phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + + /* Allocate RX Monitor Buffer */ + if (!phba->rxtable) { + phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY, + sizeof(struct rxtable_entry), + GFP_KERNEL); + if (!phba->rxtable) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2644 Failed to alloc memory " + "for RX Monitor Buffer\n"); + return -ENOMEM; + } + } + atomic_set(&phba->rxtable_idx_head, 0); + atomic_set(&phba->rxtable_idx_tail, 0); return 0; } -- 2.26.2