Re: [PATCH] TCP Westwood+

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 15 Mar 2004 13:00:43 -0800
"David S. Miller" <davem@redhat.com> wrote:

>On Mon, 15 Mar 2004 16:42:20 +0100
>"Angelo Dell'Aera" <buffer@antifork.org> wrote:
>
>> this is the 2.4 version of Stephen's patch which removes
>> bw_sample. I did even few small changes to comments which
>> were simply too old and didn't reflect in any way the 
>> current implementation. It applies against 2.4.26-pre3.
>
>Applied, can you make the comment fixups (on top of Stephen's
>patch) for 2.6.x too?  We should keep these two things in sync
>as far as is reasonable.

That's what you asked for me. It applies against 2.6.4 with 
Stephen's patch still applied.

Regards.

--

Angelo Dell'Aera 'buffer' 
Antifork Research, Inc.	  	http://buffer.antifork.org


diff -Naur linux-2.6.4-old/net/ipv4/tcp_input.c linux-2.6.4/net/ipv4/tcp_input.c
--- linux-2.6.4-old/net/ipv4/tcp_input.c	2004-03-15 23:30:13.000000000 +0100
+++ linux-2.6.4/net/ipv4/tcp_input.c	2004-03-15 23:36:44.000000000 +0100
@@ -2051,16 +2051,21 @@
 	 */
 	tp->frto_counter = (tp->frto_counter + 1) % 3;
 }
+
 /*
- * TCP Westwood
- * Functions needed for estimating bandwidth.
+ * TCP Westwood+
  */
 
 /*
- * This function initializes fields used in TCP Westwood.
- * We can't get no information about RTT at this time so
- * we are forced to set it to 0.
+ * @init_westwood
+ * This function initializes fields used in TCP Westwood+. We can't
+ * get no information about RTTmin at this time so we simply set it to
+ * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
+ * since in this way we're sure it will be updated in a consistent
+ * way as soon as possible. It will reasonably happen within the first
+ * RTT period of the connection lifetime.
  */
+
 static void init_westwood(struct sock *sk)
 {
         struct tcp_opt *tp = tcp_sk(sk);
@@ -2079,6 +2084,7 @@
  * @westwood_do_filter
  * Low-pass filter. Implemented using constant coeffients.
  */
+
 static inline __u32 westwood_do_filter(__u32 a, __u32 b)
 {
 	return (((7 * a) + b) >> 3);
@@ -2096,10 +2102,12 @@
 				   tp->westwood.bw_ns_est);
 }
 
-/* @westwood_update_rttmin
+/* 
+ * @westwood_update_rttmin
  * It is used to update RTTmin. In this case we MUST NOT use
  * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
  */
+
 static inline __u32 westwood_update_rttmin(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
@@ -2116,9 +2124,9 @@
 
 /*
  * @westwood_acked
- * Evaluate increases for dk. It requires no lock since when it is
- * called lock should already be held. Be careful about it!
+ * Evaluate increases for dk. 
  */
+
 static inline __u32 westwood_acked(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
@@ -2134,6 +2142,7 @@
  * It returns 0 if we are still evaluating samples in the same RTT
  * window, 1 if the sample has to be considered in the next window.
  */
+
 static int westwood_new_window(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
@@ -2162,11 +2171,9 @@
 /*
  * @westwood_update_window
  * It updates RTT evaluation window if it is the right moment to do
- * it. If so it calls filter for evaluating bandwidth. Be careful
- * about __westwood_update_window() since it is called without
- * any form of lock. It should be used only for internal purposes.
- * Call westwood_update_window() instead.
+ * it. If so it calls filter for evaluating bandwidth. 
  */
+
 static void __westwood_update_window(struct sock *sk, __u32 now)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
@@ -2190,11 +2197,12 @@
 }
 
 /*
- * @__westwood_fast_bw
+ * @__tcp_westwood_fast_bw
  * It is called when we are in fast path. In particular it is called when
  * header prediction is successfull. In such case infact update is
  * straight forward and doesn't need any particular care.
  */
+
 void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
@@ -2208,7 +2216,7 @@
 
 
 /*
- * @tcp_westwood_dupack_update
+ * @westwood_dupack_update
  * It updates accounted and cumul_ack when receiving a dupack.
  */
 
@@ -2242,6 +2250,7 @@
  * This function evaluates cumul_ack for evaluating dk in case of
  * delayed or partial acks.
  */
+
 static __u32 westwood_acked_count(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
@@ -2269,11 +2278,12 @@
 
 
 /*
- * @__westwood_slow_bw
+ * @__tcp_westwood_slow_bw
  * It is called when something is going wrong..even if there could
  * be no problems! Infact a simple delayed packet may trigger a
  * dupack. But we need to be careful in such case.
  */
+
 void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_opt *tp = tcp_sk(sk);


-
: send the line "unsubscribe linux-net" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Netdev]     [Ethernet Bridging]     [Linux 802.1Q VLAN]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Git]     [Bugtraq]     [Yosemite News and Information]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux PCI]     [Linux Admin]     [Samba]

  Powered by Linux