Re: [PATCH v9 3/3] soc: qcom: rpmh: Invoke rpmh_flush() for dirty caches

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 2/29/2020 3:18 AM, Evan Green wrote:
Hi Maulik,
Thanks for spinning this so promptly.

On Fri, Feb 28, 2020 at 3:38 AM Maulik Shah <mkshah@xxxxxxxxxxxxxx> wrote:
Add changes to invoke rpmh flush() from within cache_lock when the data
in cache is dirty.

This is done only if OSI is not supported in PSCI. If OSI is supported
rpmh_flush can get invoked when the last cpu going to power collapse
deepest low power mode.

Also remove "depends on COMPILE_TEST" for Kconfig option QCOM_RPMH so the
driver is only compiled for arm64 which supports psci_has_osi_support()
API.

Signed-off-by: Maulik Shah <mkshah@xxxxxxxxxxxxxx>
Reviewed-by: Srinivas Rao L <lsrao@xxxxxxxxxxxxxx>
---
  drivers/soc/qcom/Kconfig |  2 +-
  drivers/soc/qcom/rpmh.c  | 33 ++++++++++++++++++++++-----------
  2 files changed, 23 insertions(+), 12 deletions(-)

diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index d0a73e7..2e581bc 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -105,7 +105,7 @@ config QCOM_RMTFS_MEM

  config QCOM_RPMH
         bool "Qualcomm RPM-Hardened (RPMH) Communication"
-       depends on ARCH_QCOM && ARM64 || COMPILE_TEST
+       depends on ARCH_QCOM && ARM64
         help
           Support for communication with the hardened-RPM blocks in
           Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index f28afe4..6a5a60c 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -12,6 +12,7 @@
  #include <linux/module.h>
  #include <linux/of.h>
  #include <linux/platform_device.h>
+#include <linux/psci.h>
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/types.h>
@@ -158,6 +159,13 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
         }

  unlock:
+       if (ctrlr->dirty && !psci_has_osi_support()) {
+               if (rpmh_flush(ctrlr)) {
+                       spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);

         return req;
@@ -285,26 +293,35 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
  }
  EXPORT_SYMBOL(rpmh_write);

-static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+static int cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
  {
         unsigned long flags;

         spin_lock_irqsave(&ctrlr->cache_lock, flags);
+
         list_add_tail(&req->list, &ctrlr->batch_cache);
         ctrlr->dirty = true;
+
+       if (!psci_has_osi_support()) {
+               if (rpmh_flush(ctrlr)) {
+                       spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+                       return -EINVAL;
+               }
+       }
+
         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+       return 0;
  }

  static int flush_batch(struct rpmh_ctrlr *ctrlr)
  {
         struct batch_cache_req *req;
         const struct rpmh_request *rpm_msg;
-       unsigned long flags;
         int ret = 0;
         int i;

         /* Send Sleep/Wake requests to the controller, expect no response */
-       spin_lock_irqsave(&ctrlr->cache_lock, flags);
         list_for_each_entry(req, &ctrlr->batch_cache, list) {
                 for (i = 0; i < req->count; i++) {
                         rpm_msg = req->rpm_msgs + i;
@@ -314,7 +331,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
                                 break;
                 }
         }
-       spin_unlock_irqrestore(&ctrlr->cache_lock, flags);

         return ret;
  }
@@ -386,10 +402,8 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
                 cmd += n[i];
         }

-       if (state != RPMH_ACTIVE_ONLY_STATE) {
-               cache_batch(ctrlr, req);
-               return 0;
-       }
+       if (state != RPMH_ACTIVE_ONLY_STATE)
+               return cache_batch(ctrlr, req);

         for (i = 0; i < count; i++) {
                 struct completion *compl = &compls[i];
@@ -455,9 +469,6 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
   * Return: -EBUSY if the controller is busy, probably waiting on a response
   * to a RPMH request sent earlier.
   *
- * This function is always called from the sleep code from the last CPU
- * that is powering down the entire system. Since no other RPMH API would be
- * executing at this time, it is safe to run lockless.
Oh nice, I didn't even see that comment. We should probably replace
that with a comment indicating that we assume ctrlr->cache_lock is
already held.

Please also remove this comment in rpmh_flush():
         /*
          * Nobody else should be calling this function other than system PM,
          * hence we can run without locks.
          */
         list_for_each_entry(p, &ctrlr->cache, list) {

-Evan
Done, will remove in next revision.

   */
  int rpmh_flush(struct rpmh_ctrlr *ctrlr)
  {
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation

--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation



[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [Linux for Sparc]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux