A bulk transfer of the USB may contain many packets. And, the total number of the packets in the bulk transfer may be more than budget. Originally, only budget packets would be handled by napi_gro_receive(), and the other packets would be queued in the driver for next schedule. This patch would break the loop about getting next bulk transfer, when the budget is exhausted. That is, only the current bulk transfer would be handled, and the other bulk transfers would be queued for next schedule. Besides, the packets which are more than the budget in the current bulk trasnfer would be still queued in the driver, as the original method. In addition, a bulk transfer wouldn't contain more than 400 packets, so the check of queue length is unnecessary. Therefore, I replace it with WARN_ON_ONCE(). Fixes: cf74eb5a5bc8 ("eth: r8152: try to use a normal budget") Signed-off-by: Hayes Wang <hayeswang@xxxxxxxxxxx> --- drivers/net/usb/r8152.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0c13d9950cd8..c4038def193f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2449,7 +2449,7 @@ static int rx_bottom(struct r8152 *tp, int budget) } } - if (list_empty(&tp->rx_done)) + if (list_empty(&tp->rx_done) || work_done >= budget) goto out1; clear_bit(RX_EPROTO, &tp->flags); @@ -2465,6 +2465,15 @@ static int rx_bottom(struct r8152 *tp, int budget) struct urb *urb; u8 *rx_data; + /* A bulk transfer of USB may contain may packets, so the + * total packets may more than the budget. Deal with all + * packets in current bulk transfer, and stop to handle the + * next bulk transfer until next schedule, if budget is + * exhausted. + */ + if (work_done >= budget) + break; + list_del_init(cursor); agg = list_entry(cursor, struct rx_agg, list); @@ -2484,9 +2493,7 @@ static int rx_bottom(struct r8152 *tp, int budget) unsigned int pkt_len, rx_frag_head_sz; struct sk_buff *skb; - /* limit the skb numbers for rx_queue */ - if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000)) - break; + WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) @@ -2564,9 +2571,10 @@ static int rx_bottom(struct r8152 *tp, int budget) } } + /* Splice the remained list back to rx_done for next schedule */ if (!list_empty(&rx_queue)) { spin_lock_irqsave(&tp->rx_lock, flags); - list_splice_tail(&rx_queue, &tp->rx_done); + list_splice(&rx_queue, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } -- 2.41.0