From: Chris Lew <clew@xxxxxxxxxxxxxx> In high traffic scenarios a remote may request extra intents to send data faster. If the work thread that handles these intent requests is starved of cpu time, then these requests can build up. Some remote procs may not be able to handle this burst of built up intent requests. In order to prevent intent build up, deny intent requests that can be fulfilled by default intents that are reusable. Signed-off-by: Chris Lew <clew@xxxxxxxxxxxxxx> Signed-off-by: Deepak Kumar Singh <deesin@xxxxxxxxxxxxxx> --- drivers/rpmsg/qcom_glink_native.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 2668c66..df3c608 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -734,9 +734,11 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink, static void qcom_glink_handle_intent_req(struct qcom_glink *glink, u32 cid, size_t size) { - struct glink_core_rx_intent *intent; + struct glink_core_rx_intent *intent = NULL; + struct glink_core_rx_intent *tmp; struct glink_channel *channel; unsigned long flags; + int iid; spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, cid); @@ -747,6 +749,19 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink, return; } + spin_lock_irqsave(&channel->intent_lock, flags); + idr_for_each_entry(&channel->liids, tmp, iid) { + if (tmp->size >= size && tmp->reuse) { + intent = tmp; + break; + } + } + spin_unlock_irqrestore(&channel->intent_lock, flags); + if (intent) { + qcom_glink_send_intent_req_ack(glink, channel, !!intent); + return; + } + intent = qcom_glink_alloc_intent(glink, channel, size, false); if (intent) qcom_glink_advertise_intent(glink, channel, intent); -- The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum, a Linux Foundation Collaborative Project