intel_uc_runtime_suspend(>->uc);
GT_TRACE(gt, "\n");
@@ -353,11 +358,19 @@ void intel_gt_runtime_suspend(struct intel_gt *gt)
int intel_gt_runtime_resume(struct intel_gt *gt)
{
+ int ret;
+
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
intel_ggtt_restore_fences(gt->ggtt);
- return intel_uc_runtime_resume(>->uc);
+ ret = intel_uc_runtime_resume(>->uc);
+ if (ret)
+ return ret;
+
+ intel_pxp_resume(>->pxp);
+
+ return 0;
}
static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 59fb4c710c8c..d5bcc70a22d4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,6 +67,8 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "pxp/intel_pxp_pm.h"
+
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_ioc32.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 340f20d130a8..9e5847c653f2 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -9,6 +9,7 @@
#include "gt/intel_gt_irq.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "intel_runtime_pm.h"
/**
* intel_pxp_irq_handler - Handles PXP interrupts.
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
new file mode 100644
index 000000000000..23fd86de5a24
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_pm.h"
+#include "intel_pxp_session.h"
+
+void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ /*
+ * Contexts using protected objects keep a runtime PM reference, so we
+ * can only runtime suspend when all of them have been either closed
+ * or banned. Therefore, there is no need to invalidate in that
+ * scenario.
+ */
+ intel_pxp_invalidate(pxp);
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
+
+void intel_pxp_resume(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ /*
+ * The PXP component gets automatically unbound when we go into S3 and
+ * re-bound after we come out, so in that scenario we can defer the
+ * hw init to the bind call.
+ */
+ if (!pxp->pxp_component)
+ return;
+
+ intel_pxp_init_hw(pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
new file mode 100644
index 000000000000..e6a357996e19
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_PM_H__
+#define __INTEL_PXP_PM_H__
+
+#include "i915_drv.h"
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
+void intel_pxp_resume(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+{
+}
+static inline void intel_pxp_resume(struct intel_pxp *pxp)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index a95cc443a48d..d02732f04757 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -21,29 +21,36 @@
static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
intel_wakeref_t wakeref;
u32 sip = 0;
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- sip = intel_uncore_read(gt->uncore, GEN12_KCR_SIP);
+ /* if we're suspended the session is considered off */
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
+ sip = intel_uncore_read(uncore, GEN12_KCR_SIP);
return sip & BIT(id);
}
static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
intel_wakeref_t wakeref;
u32 mask = BIT(id);
int ret;
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- ret = intel_wait_for_register(gt->uncore,
- GEN12_KCR_SIP,
- mask,
- in_play ? mask : 0,
- 100);
+ /* if we're suspended the session is considered off */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref)
+ return in_play ? -ENODEV : 0;
+
+ ret = intel_wait_for_register(uncore,
+ GEN12_KCR_SIP,
+ mask,
+ in_play ? mask : 0,
+ 100);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return ret;
}
@@ -135,6 +142,7 @@ void intel_pxp_session_work(struct work_struct *work)
{
struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
struct intel_gt *gt = pxp_to_gt(pxp);
+ intel_wakeref_t wakeref;
u32 events = 0;
spin_lock_irq(>->irq_lock);
@@ -147,6 +155,14 @@ void intel_pxp_session_work(struct work_struct *work)
if (events & PXP_INVAL_REQUIRED)
intel_pxp_invalidate(pxp);
+ /*
+ * If we're processing an event while suspending then don't bother,
+ * we're going to re-init everything on resume anyway.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
+ if (!wakeref)
+ return;
+
if (events & PXP_TERMINATION_REQUEST) {
events &= ~PXP_TERMINATION_COMPLETE;
pxp_terminate(pxp);
@@ -154,4 +170,6 @@ void intel_pxp_session_work(struct work_struct *work)
if (events & PXP_TERMINATION_COMPLETE)
pxp_terminate_complete(pxp);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 3fc3ddfd02b3..49508f31dcb7 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -78,16 +78,25 @@ static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
static int i915_pxp_tee_component_bind(struct device *i915_kdev,
struct device *tee_kdev, void *data)
{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = data;
pxp->pxp_component->tee_dev = tee_kdev;
mutex_unlock(&pxp->tee_mutex);
+ /* if we are suspended, the HW will be re-initialized on resume */
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
+ if (!wakeref)
+ return 0;
+
/* the component is required to fully start the PXP HW */
intel_pxp_init_hw(pxp);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
return 0;
}
--
2.25.1