Re: [PATCH v5 net-next 01/13] tcp: reorganize tcp_in_ack_event() and tcp_count_delivered()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Nov 5, 2024 at 11:07 AM <chia-yu.chang@xxxxxxxxxxxxxxxxxxx> wrote:
>
> From: Ilpo Järvinen <ij@xxxxxxxxxx>
>
> - Move tcp_count_delivered() earlier and split tcp_count_delivered_ce()
>   out of it
> - Move tcp_in_ack_event() later
> - While at it, remove the inline from tcp_in_ack_event() and let
>   the compiler to decide
>
> Accurate ECN's heuristics does not know if there is going
> to be ACE field based CE counter increase or not until after
> rtx queue has been processed. Only then the number of ACKed
> bytes/pkts is available. As CE or not affects presence of
> FLAG_ECE, that information for tcp_in_ack_event is not yet
> available in the old location of the call to tcp_in_ack_event().
>
> Signed-off-by: Ilpo Järvinen <ij@xxxxxxxxxx>
> Signed-off-by: Chia-Yu Chang <chia-yu.chang@xxxxxxxxxxxxxxxxxxx>
> ---
>  net/ipv4/tcp_input.c | 56 +++++++++++++++++++++++++-------------------
>  1 file changed, 32 insertions(+), 24 deletions(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index 5bdf13ac26ef..fc52eab4fcc9 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -413,6 +413,20 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
>         return false;
>  }
>
> +static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
> +{
> +       tp->delivered_ce += ecn_count;
> +}
> +
> +/* Updates the delivered and delivered_ce counts */
> +static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
> +                               bool ece_ack)
> +{
> +       tp->delivered += delivered;
> +       if (ece_ack)
> +               tcp_count_delivered_ce(tp, delivered);
> +}
> +
>  /* Buffer size and advertised window tuning.
>   *
>   * 1. Tuning sk->sk_sndbuf, when connection enters established state.
> @@ -1148,15 +1162,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
>         }
>  }
>
> -/* Updates the delivered and delivered_ce counts */
> -static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
> -                               bool ece_ack)
> -{
> -       tp->delivered += delivered;
> -       if (ece_ack)
> -               tp->delivered_ce += delivered;
> -}
> -
>  /* This procedure tags the retransmission queue when SACKs arrive.
>   *
>   * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
> @@ -3856,12 +3861,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
>         }
>  }
>
> -static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
> +static void tcp_in_ack_event(struct sock *sk, int flag)
>  {
>         const struct inet_connection_sock *icsk = inet_csk(sk);
>
> -       if (icsk->icsk_ca_ops->in_ack_event)
> -               icsk->icsk_ca_ops->in_ack_event(sk, flags);
> +       if (icsk->icsk_ca_ops->in_ack_event) {
> +               u32 ack_ev_flags = 0;
> +
> +               if (flag & FLAG_WIN_UPDATE)
> +                       ack_ev_flags |= CA_ACK_WIN_UPDATE;
> +               if (flag & FLAG_SLOWPATH) {
> +                       ack_ev_flags = CA_ACK_SLOWPATH;

This is removing the potential CA_ACK_WIN_UPDATE, I would suggest :

ack_ev_flags |= CA_ACK_SLOWPATH;


> +                       if (flag & FLAG_ECE)
> +                               ack_ev_flags |= CA_ACK_ECE;
> +               }
> +
> +               icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags);
> +       }
>  }
>
>





[Index of Archives]     [Netfitler Users]     [Berkeley Packet Filter]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux