+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ query = kzalloc(sizeof(struct ufs_query_req), GFP_KERNEL);
+ if (!query) {
+ dev_err(hba->dev,
+ "%s: Failed allocating ufs_query_req instance\n",
+ __func__);
+ goto out;
+ }
+
+ response = kzalloc(sizeof(struct ufs_query_res), GFP_KERNEL);
+ if (!response) {
+ dev_err(hba->dev,
+ "%s: Failed allocating ufs_query_res instance\n",
+ __func__);
+ goto out_free_query;
+ }
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ query->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ query->upiu_req.value = *attr_val;
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ query->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ query->upiu_req.opcode = opcode;
+ query->upiu_req.idn = idn;
+ query->upiu_req.index = index;
+ query->upiu_req.selector = selector;
+
+ /* Send query request */
+ err = ufshcd_send_query_request(hba, query, NULL, response);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
+ __func__, opcode, idn, err);
+ goto out_free;
+ }
+
+ *attr_val = response->upiu_res.value;
+
+out_free:
+ kfree(response);
+out_free_query:
+ kfree(query);
+out:
+ return err;
+}
+
+/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
*
@@ -1842,6 +1937,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
*/
scsi_status = result & MASK_SCSI_STATUS;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -1942,6 +2040,215 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
}
/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+out:
+ return err;
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ int err;
+ u32 status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status = status & 0xF;
+
+ /* handle only if status indicates performance impact or critical */
+ if (status >= BKOPS_STATUS_PERF_IMPACT)
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ return err;
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+ if (status & MASK_EE_URGENT_BKOPS) {
+ err = ufshcd_urgent_bkops(hba);
+ if (err)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+ }
+out:
+ return;
+}
+
+/**
* ufshcd_fatal_err_handler - handle fatal errors
* @hba: per adapter instance
*/
@@ -2252,6 +2559,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
if (ret)
goto out;
+ hba->auto_bkops_enabled = false;
+ ufshcd_enable_auto_bkops(hba);
scsi_scan_host(hba->host);
out:
return;
@@ -2316,6 +2625,34 @@ int ufshcd_resume(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_resume);
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ if (!hba)
+ return 0;
+
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations.
+ */
+ return ufshcd_enable_auto_bkops(hba);
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ if (!hba)
+ return 0;
+
+ return ufshcd_disable_auto_bkops(hba);
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
@@ -2406,6 +2743,7 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
/* Initialize work queues */
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c6aeb6d..6c9bd35 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -178,9 +178,12 @@ struct ufs_dev_cmd {
* @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @intr_mask: Interrupt Mask Bits
+ * @ee_ctrl_mask: Exception event control mask
* @feh_workq: Work queue for fatal controller error handling
+ * @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @dev_cmd: ufs device management command information
+ * @auto_bkops_enabled: to track whether bkops is enabled in device
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -218,15 +221,19 @@ struct ufs_hba {
u32 ufshcd_state;
u32 intr_mask;
+ u16 ee_ctrl_mask;
/* Work Queues */
struct work_struct feh_workq;
+ struct work_struct eeh_work;
/* HBA Errors */
u32 errors;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
+
+ bool auto_bkops_enabled;
};
#define ufshcd_writel(hba, val, reg) \
@@ -247,4 +254,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
}
+extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
+extern int ufshcd_runtime_resume(struct ufs_hba *hba);
+extern int ufshcd_runtime_idle(struct ufs_hba *hba);
#endif /* End of Header */