On Mon, Oct 08, 2018 at 09:27:24PM -0700, Jeykumar Sankaran wrote:
DPU maintained reservation lists to cache assigned
HW blocks for the display and a retrieval mechanism for
the individual DRM components to query their respective
HW blocks.
This patch uses the sub-classed CRTC state to store
and track HW blocks assigned for different components
of the display pipeline. It helps the driver:
- to get rid of unwanted store and retrieval RM API's
- to preserve HW resources assigned in atomic_check
through atomic swap/duplicate.
Separate patch is submitted to remove resource
reservation in atomic_commit path.
Signed-off-by: Jeykumar Sankaran <jsanka@xxxxxxxxxxxxxx>
---
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 65
+++-------------------
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h | 14 +++++
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 28 +++-------
.../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 20 ++-----
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 58
++++++++++++-------
5 files changed, 72 insertions(+), 113 deletions(-)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4960641..0625f56 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -421,69 +421,20 @@ void dpu_crtc_complete_commit(struct drm_crtc
*crtc,
trace_dpu_crtc_complete_commit(DRMID(crtc));
}
-static void _dpu_crtc_setup_mixer_for_encoder(
- struct drm_crtc *crtc,
- struct drm_encoder *enc)
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
- struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
- struct dpu_rm *rm = &dpu_kms->rm;
struct dpu_crtc_mixer *mixer;
- struct dpu_hw_ctl *last_valid_ctl = NULL;
- int i;
- struct dpu_rm_hw_iter lm_iter, ctl_iter;
-
- dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
- dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+ int i, ctl_index;
/* Set up all the mixers and ctls reserved by this encoder */
- for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++)
{
+ for (i = 0; i < cstate->num_mixers; i++) {
mixer = &cstate->mixers[i];
- if (!dpu_rm_get_hw(rm, &lm_iter))
- break;
- mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
/* CTL may be <= LMs, if <, multiple LMs controlled by 1
CTL */
- if (!dpu_rm_get_hw(rm, &ctl_iter)) {
- DPU_DEBUG("no ctl assigned to lm %d, using
previous\n",
- mixer->hw_lm->idx - LM_0);
- mixer->lm_ctl = last_valid_ctl;
- } else {
- mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->lm_ctl;
- }
-
- /* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->lm_ctl) {
- DPU_ERROR("no valid ctls found for lm %d\n",
- mixer->hw_lm->idx - LM_0);
- return;
- }
-
- cstate->num_mixers++;
- DPU_DEBUG("setup mixer %d: lm %d\n",
- i, mixer->hw_lm->idx - LM_0);
- DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->lm_ctl->idx - CTL_0);
- }
-}
-
-static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct drm_encoder *enc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- /* Check for mixers on all encoders attached to this crtc */
- list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list,
head) {
- if (enc->crtc != crtc)
- continue;
-
- _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+ ctl_index = min(i, cstate->num_ctls - 1);
This is another one of those places I mentioned where we're just
assuming
a
value is going to be in a certain range. If
num_ctls/num_intfs/num_phys_encs
(all the same value afaict) is 0, we end up in a bad place.
+ mixer->lm_ctl = cstate->hw_ctls[ctl_index];
}
-
- mutex_unlock(&dpu_crtc->crtc_lock);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
@@ -536,10 +487,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc
*crtc,
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!cstate->num_mixers) {
- _dpu_crtc_setup_mixers(crtc);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- }
+ _dpu_crtc_setup_mixers(crtc);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 75fdd3c..17aaad7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -84,12 +84,14 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in
the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
+ * @hw_pp: Pingpong HW driver context
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl;
+ struct dpu_hw_pingpong *hw_pp;
u32 mixer_op_mode;
u32 flush_mask;
};
@@ -230,6 +232,18 @@ struct dpu_crtc_state {
u32 num_ctls;
struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+
+ /**
+ * as drm encoders doesn't have dedicates state objects
+ * and drm connectors are not owned by DPU, maintain
+ * HW interface and other interface related blocks
+ * in crtc state
Can you use private state instead?
+ *
+ * TODO: No support for clone mode yet where a crtc
+ * can be attached with more than one encoder/connector.
+ */
+ u32 num_intfs;
+ struct dpu_hw_intf *hw_intfs[CRTC_DUAL_MIXERS];
};
#define to_dpu_crtc_state(x) \
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index d12f896..17dbbc3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1001,9 +1001,8 @@ static void dpu_encoder_virt_mode_set(struct
drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter, ctl_iter;
+ struct dpu_crtc_state *dpu_cstate;
struct msm_display_topology topology;
- struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
int i = 0, ret;
if (!drm_enc) {
@@ -1043,27 +1042,14 @@ static void dpu_encoder_virt_mode_set(struct
drm_encoder *drm_enc,
return;
}
- dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id,
DPU_HW_BLK_PINGPONG);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
- break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
- }
-
- dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
- break;
- hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
- }
+ dpu_cstate = to_dpu_crtc_state(drm_enc->crtc->state);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
int ctl_index;
if (phys) {
- if (!dpu_enc->hw_pp[i]) {
+ if (!dpu_cstate->mixers[i].hw_pp) {
DPU_ERROR_ENC(dpu_enc, "no pp block
assigned"
"at idx: %d\n", i);
return;
@@ -1071,14 +1057,16 @@ static void dpu_encoder_virt_mode_set(struct
drm_encoder *drm_enc,
ctl_index = phys->split_role == ENC_ROLE_SLAVE ? 1
: 0;
- if (!hw_ctl[ctl_index]) {
+ if (!dpu_cstate->hw_ctls[ctl_index]) {
DPU_ERROR_ENC(dpu_enc, "no ctl block
assigned"
"at idx: %d\n", ctl_index);
return;
}
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[ctl_index];
+ dpu_enc->hw_pp[i] = dpu_cstate->mixers[i].hw_pp;
+
+ phys->hw_pp = dpu_cstate->mixers[i].hw_pp;
+ phys->hw_ctl = dpu_cstate->hw_ctls[ctl_index];
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 84de385..4563f8d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -461,28 +461,20 @@ static int
dpu_encoder_phys_vid_control_vblank_irq(
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys
*phys_enc)
{
- struct msm_drm_private *priv;
struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
- DPU_ERROR("invalid encoder/device\n");
- return;
- }
- priv = phys_enc->parent->dev->dev_private;
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id,
DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf
*)iter.hw;
-
- if (hw_intf->idx == phys_enc->intf_idx) {
- vid_enc->hw_intf = hw_intf;
+ for (i = 0; i < dpu_cstate->num_intfs; i++) {
+ if (dpu_cstate->hw_intfs[i]->idx == phys_enc->intf_idx) {
+ vid_enc->hw_intf = dpu_cstate->hw_intfs[i];
break;
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 377def7..5703b11 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -388,6 +388,7 @@ static bool
_dpu_rm_check_lm_and_get_connected_blks(
}
static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+ struct dpu_crtc_state *dpu_cstate,
struct dpu_rm_requirements *reqs)
{
@@ -449,16 +450,22 @@ static int _dpu_rm_reserve_lms(struct dpu_rm
*rm,
uint32_t enc_id,
lm[i]->enc_id = enc_id;
pp[i]->enc_id = enc_id;
+ dpu_cstate->mixers[i].hw_lm = to_dpu_hw_mixer(lm[i]->hw);
+ dpu_cstate->mixers[i].hw_pp =
to_dpu_hw_pingpong(pp[i]->hw);
+
trace_dpu_rm_reserve_lms(lm[i]->id, DPU_HW_BLK_LM, enc_id,
pp[i]->id);
}
+ dpu_cstate->num_mixers = lm_count;
+
return rc;
}
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
uint32_t enc_id,
+ struct dpu_crtc_state *dpu_cstate,
const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
@@ -501,21 +508,24 @@ static int _dpu_rm_reserve_ctls(
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->enc_id = enc_id;
+ dpu_cstate->hw_ctls[i] = to_dpu_hw_ctl(ctls[i]->hw);
+
trace_dpu_rm_reserve_ctls(ctls[i]->id, DPU_HW_BLK_CTL,
enc_id);
}
+ dpu_cstate->num_ctls = num_ctls;
+
return 0;
}
-static int _dpu_rm_reserve_intf(
+static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
struct dpu_rm *rm,
uint32_t enc_id,
uint32_t id,
enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
- int ret = 0;
/* Find the block entry in the rm, and note the reservation */
dpu_rm_init_hw_iter(&iter, 0, type);
@@ -525,7 +535,7 @@ static int _dpu_rm_reserve_intf(
if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
DPU_ERROR("type %d id %d already reserved\n",
type, id);
- return -ENAVAIL;
+ return NULL;
}
iter.blk->enc_id = enc_id;
@@ -535,56 +545,63 @@ static int _dpu_rm_reserve_intf(
}
/* Shouldn't happen since intfs are fixed at probe */
- if (!iter.hw) {
+ if (!iter.blk) {
DPU_ERROR("couldn't find type %d id %d\n", type, id);
- return -EINVAL;
+ return NULL;
}
- return ret;
+ return iter.blk;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
uint32_t enc_id,
+ struct dpu_crtc_state *dpu_cstate,
struct dpu_encoder_hw_resources *hw_res)
{
- int i, ret = 0;
- u32 id;
+ struct dpu_rm_hw_blk *blk;
+ int i, num_intfs = 0;
for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
- id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, enc_id, id,
+
+ blk = _dpu_rm_reserve_intf(rm, enc_id, i + INTF_0,
DPU_HW_BLK_INTF);
- if (ret)
- return ret;
+ if (!blk)
+ return -ENAVAIL;
+
+ dpu_cstate->hw_intfs[num_intfs++] =
to_dpu_hw_intf(blk->hw);
}
- return ret;
+ dpu_cstate->num_intfs = num_intfs;
+
+ return 0;
}
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct dpu_crtc_state *dpu_cstate,
struct dpu_rm_requirements *reqs)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, enc->base.id, dpu_cstate, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, enc->base.id, dpu_cstate,
+ &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id,
&reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id,
dpu_cstate,
+ &reqs->hw_res);
if (ret)
return ret;
@@ -594,7 +611,6 @@ static int _dpu_rm_make_reservation(
static int _dpu_rm_populate_requirements(
struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
@@ -642,6 +658,7 @@ int dpu_rm_reserve(
bool test_only)
{
struct dpu_rm_requirements reqs;
+ struct dpu_crtc_state *dpu_cstate = to_dpu_crtc_state(crtc_state);
int ret;
/* Check if this is just a page-flip */
@@ -653,14 +670,13 @@ int dpu_rm_reserve(
mutex_lock(&rm->rm_lock);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
- topology);
+ ret = _dpu_rm_populate_requirements(rm, enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
goto end;
}
- ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
+ ret = _dpu_rm_make_reservation(rm, enc, dpu_cstate, &reqs);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
_dpu_rm_release_reservation(rm, enc->base.id);
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
Forum,
a Linux Foundation Collaborative Project