Re: [ANNOUNCE] TCP Westwood+

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 30 Jan 2004 15:50:35 -0800
"David S. Miller" <davem@redhat.com> wrote:

> On Fri, 30 Jan 2004 13:20:39 -0800
> Stephen Hemminger <shemminger@osdl.org> wrote:
> 
> > Right now I am testing a version that builds on 2.6, and cleans up some little things.
> 
> Ok, you want to integrate these cleanups and fixes I made to his 2.4.x
> patch.

Durrr, and here it actually is.

ChangeSet@1.1303, 2004-01-30 14:57:14-08:00, davem@nuts.davemloft.net
  [TCP]: Put tcp_ prefix on global westwood symbols.

diff -Nru a/include/net/tcp.h b/include/net/tcp.h
--- a/include/net/tcp.h	Fri Jan 30 15:44:52 2004
+++ b/include/net/tcp.h	Fri Jan 30 15:44:52 2004
@@ -1867,18 +1867,18 @@
 
 /* TCP Westwood functions and constants */
 
-#define WESTWOOD_INIT_RTT               20*HZ           /* maybe too conservative?! */
-#define WESTWOOD_RTT_MIN                HZ/20           /* 50ms */
+#define TCP_WESTWOOD_INIT_RTT               20*HZ           /* maybe too conservative?! */
+#define TCP_WESTWOOD_RTT_MIN                HZ/20           /* 50ms */
 
 
-static inline void westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
+static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
 {
         if (sysctl_tcp_westwood)
                 tp->westwood.rtt = rtt_seq;
 }
 
-void __westwood_fast_bw(struct sock *, struct sk_buff *);
-void __westwood_slow_bw(struct sock *, struct sk_buff *);
+void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
+void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
 
 /*
  * This function initializes fields used in TCP Westwood.
@@ -1886,7 +1886,7 @@
  * we are forced to set it to 0.
  */
 
-static inline void __init_westwood(struct sock *sk)
+static inline void __tcp_init_westwood(struct sock *sk)
 {
         struct tcp_opt *tp=&(sk->tp_pinfo.af_tcp);
 
@@ -1896,53 +1896,53 @@
         tp->westwood.accounted = 0;
         tp->westwood.cumul_ack = 0;
         tp->westwood.rtt_win_sx = tcp_time_stamp;
-        tp->westwood.rtt = WESTWOOD_INIT_RTT;
-        tp->westwood.rtt_min = WESTWOOD_INIT_RTT;
+        tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
+        tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
         tp->westwood.snd_una = tp->snd_una;
         tp->westwood.lock = RW_LOCK_UNLOCKED;
 }
 
-static inline void init_westwood(struct sock *sk)
+static inline void tcp_init_westwood(struct sock *sk)
 {
-	__init_westwood(sk);
+	__tcp_init_westwood(sk);
 }
 
-static inline void westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
 {
         if (sysctl_tcp_westwood)
-                __westwood_fast_bw(sk, skb);
+                __tcp_westwood_fast_bw(sk, skb);
 }
 
-static inline void westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
 {
         if (sysctl_tcp_westwood)
-                __westwood_slow_bw(sk, skb);
+                __tcp_westwood_slow_bw(sk, skb);
 }
 
-static inline __u32 __westwood_bw_rttmin(struct tcp_opt *tp)
+static inline __u32 __tcp_westwood_bw_rttmin(struct tcp_opt *tp)
 {
         return (__u32) ((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
                         (__u32) (tp->mss_cache));
 }
 
-static inline __u32 westwood_bw_rttmin(struct tcp_opt *tp)
+static inline __u32 tcp_westwood_bw_rttmin(struct tcp_opt *tp)
 {
         __u32 ret = 0;
 	
         if (sysctl_tcp_westwood)
-                ret = (__u32) (max(__westwood_bw_rttmin(tp), 2U));
+                ret = (__u32) (max(__tcp_westwood_bw_rttmin(tp), 2U));
 	
         return ret;
 }
 
-static inline int westwood_ssthresh(struct tcp_opt *tp)
+static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
 {
 	int ret = 0;
 	__u32 ssthresh;
 	
 	if (sysctl_tcp_westwood) {
 		
-		if(!(ssthresh = westwood_bw_rttmin(tp)))
+		if(!(ssthresh = tcp_westwood_bw_rttmin(tp)))
 			return ret;
 		
 		tp->snd_ssthresh = ssthresh;    
@@ -1952,14 +1952,14 @@
 	return ret;
 }
 
-static inline int westwood_cwnd(struct tcp_opt *tp)
+static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
 {
 	int ret = 0;
 	__u32 cwnd;
 	
 	if (sysctl_tcp_westwood) {
 		
-		if(!(cwnd = westwood_bw_rttmin(tp)))
+		if(!(cwnd = tcp_westwood_bw_rttmin(tp)))
 			return ret;
 		
 		tp->snd_cwnd = cwnd;
@@ -1969,13 +1969,13 @@
 	return ret;
 }
 
-static inline int westwood_complete_cwr(struct tcp_opt *tp) 
+static inline int tcp_westwood_complete_cwr(struct tcp_opt *tp) 
 {
 	int ret = 0;
 	
 	if (sysctl_tcp_westwood) {
 		
-		if (westwood_cwnd(tp)) {
+		if (tcp_westwood_cwnd(tp)) {
 			tp->snd_ssthresh = tp->snd_cwnd;
 			ret = 1;
 		}
diff -Nru a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
--- a/net/ipv4/tcp_input.c	Fri Jan 30 15:44:52 2004
+++ b/net/ipv4/tcp_input.c	Fri Jan 30 15:44:53 2004
@@ -474,7 +474,7 @@
 		tp->rtt_seq = tp->snd_nxt;
 	}
 
-	westwood_update_rtt(tp, tp->srtt >> 3);
+	tcp_westwood_update_rtt(tp, tp->srtt >> 3);
 }
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
@@ -1074,7 +1074,7 @@
 	    (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) {
 		tp->prior_ssthresh = tcp_current_ssthresh(tp);
 
-		if (!(westwood_ssthresh(tp)))
+		if (!(tcp_westwood_ssthresh(tp)))
 			tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
 	}
 	tp->snd_cwnd = 1;
@@ -1398,7 +1398,7 @@
 	 * could assume value 0. It should not happen but...
 	 */ 
 
-	if (!(limit = westwood_bw_rttmin(tp)))
+	if (!(limit = tcp_westwood_bw_rttmin(tp)))
 		limit = tp->snd_ssthresh/2;
 
 	tp->snd_cwnd_cnt = decr&1;
@@ -1548,7 +1548,7 @@
 
 static __inline__ void tcp_complete_cwr(struct tcp_opt *tp)
 {
-	if (!(westwood_complete_cwr(tp)))
+	if (!(tcp_westwood_complete_cwr(tp)))
 		tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
@@ -2113,7 +2113,7 @@
 
 	read_lock(&tp->westwood.lock);
 	left_bound = tp->westwood.rtt_win_sx;
-	rtt = max(tp->westwood.rtt, (__u32)WESTWOOD_RTT_MIN);
+	rtt = max(tp->westwood.rtt, (__u32)TCP_WESTWOOD_RTT_MIN);
 	read_unlock(&tp->westwood.lock);
 
 	/*
@@ -2176,7 +2176,7 @@
  * straight forward and doesn't need any particular care.
  */
 
-void __westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
+void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
@@ -2277,7 +2277,7 @@
  * dupack. But we need to be careful in such case.
  */
 
-void __westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
+void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
@@ -2317,7 +2317,7 @@
 		 */
 		tcp_update_wl(tp, ack, ack_seq);
 		tp->snd_una = ack;
-		westwood_fast_bw(sk, skb);
+		tcp_westwood_fast_bw(sk, skb);
 		flag |= FLAG_WIN_UPDATE;
 
 		NET_INC_STATS_BH(TCPHPAcks);
@@ -2335,7 +2335,7 @@
 		if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
 			flag |= FLAG_ECE;
 
-		westwood_slow_bw(sk, skb);
+		tcp_westwood_slow_bw(sk, skb);
 	}
 
 	/* We passed data and got it acked, remove any soft error
@@ -4104,7 +4104,7 @@
 			if(tp->af_specific->conn_request(sk, skb) < 0)
 				return 1;
 
-			init_westwood(sk);
+			tcp_init_westwood(sk);
 
 			/* Now we have several options: In theory there is 
 			 * nothing else in the frame. KA9Q has an option to 
@@ -4128,7 +4128,7 @@
 
 	case TCP_SYN_SENT:
 
-		init_westwood(sk);
+		tcp_init_westwood(sk);
 		
 		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
 		if (queued >= 0)


ChangeSet@1.1304, 2004-01-30 15:19:14-08:00, davem@nuts.davemloft.net
  [TCP]: Coding style fixes to westwood code.

diff -Nru a/include/net/tcp.h b/include/net/tcp.h
--- a/include/net/tcp.h	Fri Jan 30 15:44:54 2004
+++ b/include/net/tcp.h	Fri Jan 30 15:44:54 2004
@@ -1870,11 +1870,10 @@
 #define TCP_WESTWOOD_INIT_RTT               20*HZ           /* maybe too conservative?! */
 #define TCP_WESTWOOD_RTT_MIN                HZ/20           /* 50ms */
 
-
 static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
 {
-        if (sysctl_tcp_westwood)
-                tp->westwood.rtt = rtt_seq;
+	if (sysctl_tcp_westwood)
+		tp->westwood.rtt = rtt_seq;
 }
 
 void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
@@ -1888,18 +1887,18 @@
 
 static inline void __tcp_init_westwood(struct sock *sk)
 {
-        struct tcp_opt *tp=&(sk->tp_pinfo.af_tcp);
+	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
-        tp->westwood.bw_sample = 0;
-        tp->westwood.bw_ns_est = 0;
-        tp->westwood.bw_est = 0;
-        tp->westwood.accounted = 0;
-        tp->westwood.cumul_ack = 0;
-        tp->westwood.rtt_win_sx = tcp_time_stamp;
-        tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
-        tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
-        tp->westwood.snd_una = tp->snd_una;
-        tp->westwood.lock = RW_LOCK_UNLOCKED;
+	tp->westwood.bw_sample = 0;
+	tp->westwood.bw_ns_est = 0;
+	tp->westwood.bw_est = 0;
+	tp->westwood.accounted = 0;
+	tp->westwood.cumul_ack = 0;
+	tp->westwood.rtt_win_sx = tcp_time_stamp;
+	tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
+	tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
+	tp->westwood.snd_una = tp->snd_una;
+	tp->westwood.lock = RW_LOCK_UNLOCKED;
 }
 
 static inline void tcp_init_westwood(struct sock *sk)
@@ -1909,46 +1908,45 @@
 
 static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
 {
-        if (sysctl_tcp_westwood)
-                __tcp_westwood_fast_bw(sk, skb);
+	if (sysctl_tcp_westwood)
+		__tcp_westwood_fast_bw(sk, skb);
 }
 
 static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
 {
-        if (sysctl_tcp_westwood)
-                __tcp_westwood_slow_bw(sk, skb);
+	if (sysctl_tcp_westwood)
+		__tcp_westwood_slow_bw(sk, skb);
 }
 
 static inline __u32 __tcp_westwood_bw_rttmin(struct tcp_opt *tp)
 {
-        return (__u32) ((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
-                        (__u32) (tp->mss_cache));
+	return (__u32) ((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
+			(__u32) (tp->mss_cache));
 }
 
 static inline __u32 tcp_westwood_bw_rttmin(struct tcp_opt *tp)
 {
-        __u32 ret = 0;
-	
-        if (sysctl_tcp_westwood)
-                ret = (__u32) (max(__tcp_westwood_bw_rttmin(tp), 2U));
-	
-        return ret;
+	__u32 ret = 0;
+
+	if (sysctl_tcp_westwood)
+		ret = (__u32) (max(__tcp_westwood_bw_rttmin(tp), 2U));
+
+	return ret;
 }
 
 static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
 {
 	int ret = 0;
 	__u32 ssthresh;
-	
+
 	if (sysctl_tcp_westwood) {
-		
-		if(!(ssthresh = tcp_westwood_bw_rttmin(tp)))
+		if (!(ssthresh = tcp_westwood_bw_rttmin(tp)))
 			return ret;
-		
-		tp->snd_ssthresh = ssthresh;    
+
+		tp->snd_ssthresh = ssthresh;
 		ret = 1;
 	}
-	
+
 	return ret;
 }
 
@@ -1956,31 +1954,29 @@
 {
 	int ret = 0;
 	__u32 cwnd;
-	
+
 	if (sysctl_tcp_westwood) {
-		
-		if(!(cwnd = tcp_westwood_bw_rttmin(tp)))
+		if (!(cwnd = tcp_westwood_bw_rttmin(tp)))
 			return ret;
-		
+
 		tp->snd_cwnd = cwnd;
 		ret = 1;
 	}
-	
+
 	return ret;
 }
 
 static inline int tcp_westwood_complete_cwr(struct tcp_opt *tp) 
 {
 	int ret = 0;
-	
+
 	if (sysctl_tcp_westwood) {
-		
 		if (tcp_westwood_cwnd(tp)) {
 			tp->snd_ssthresh = tp->snd_cwnd;
 			ret = 1;
 		}
 	}
-	
+
 	return ret;
 }
 
diff -Nru a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
--- a/net/ipv4/tcp_input.c	Fri Jan 30 15:44:54 2004
+++ b/net/ipv4/tcp_input.c	Fri Jan 30 15:44:54 2004
@@ -61,7 +61,7 @@
  *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
  *					engine. Lots of bugs are found.
  *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
- *              Angelo Dell'Aera:       TCP Westwood+ support
+ *		Angelo Dell'Aera:	TCP Westwood+ support
  */
 
 #include <linux/config.h>
@@ -1391,9 +1391,9 @@
 
 	/*
 	 * TCP Westwood
-         * Here limit is evaluated as BWestimation*RTTmin (for obtaining it
+	 * Here limit is evaluated as BWestimation*RTTmin (for obtaining it
 	 * in packets we use mss_cache). If CONFIG_TCP_WESTWOOD is not defined
-	 * westwood_bw_rttmin() returns 0. In such case snd_ssthresh is still 
+	 * westwood_bw_rttmin() returns 0. In such case snd_ssthresh is still
 	 * used as usual. It prevents other strange cases in which BWE*RTTmin
 	 * could assume value 0. It should not happen but...
 	 */ 
@@ -2037,7 +2037,7 @@
 	tp->frto_counter = (tp->frto_counter + 1) % 3;
 }
 
-/* 
+/*
  * TCP Westwood
  * Functions needed for estimating bandwidth.
  */
@@ -2049,42 +2049,44 @@
 
 static inline __u32 westwood_do_filter(__u32 a, __u32 b)
 {
-	return( (7*a + b) >> 3);
-}  
+	return (((7 * a) + b) >> 3);
+}
 
 static void westwood_filter(struct sock *sk, __u32 delta)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
 	__u32 sample = (tp->westwood.bk) / delta;
 
-	tp->westwood.bw_ns_est = westwood_do_filter(tp->westwood.bw_ns_est, sample);
-	tp->westwood.bw_est = westwood_do_filter(tp->westwood.bw_est, tp->westwood.bw_ns_est);
+	tp->westwood.bw_ns_est =
+		westwood_do_filter(tp->westwood.bw_ns_est, sample);
+	tp->westwood.bw_est =
+		westwood_do_filter(tp->westwood.bw_est,
+				   tp->westwood.bw_ns_est);
 	tp->westwood.bw_sample = sample;
 }
 
 /* @westwood_update_rttmin
- * It is used to update RTTmin. In this case we MUST NOT use 
+ * It is used to update RTTmin. In this case we MUST NOT use
  * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
  */
 
 static inline __u32 westwood_update_rttmin(struct sock *sk)
 {
-	 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-	 __u32 rttmin = tp->westwood.rtt_min;
+	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+	__u32 rttmin = tp->westwood.rtt_min;
 
-	 if (tp->westwood.rtt == 0)
-		return(rttmin);
-	
-	 if (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)
-		rttmin = tp->westwood.rtt; 
+	if (tp->westwood.rtt == 0)
+		return rttmin;
+
+	if (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)
+		rttmin = tp->westwood.rtt;
 
-	 return(rttmin);
+	return rttmin;
 }
 
 /*
  * @westwood_acked
- * Evaluate increases for dk. It requires no lock since when it is 
+ * Evaluate increases for dk. It requires no lock since when it is
  * called lock should already be held. Be careful about it!
  */
 
@@ -2093,7 +2095,7 @@
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
 	return ((tp->snd_una) - (tp->westwood.snd_una));
-} 
+}
 
 /*
  * @westwood_new_window
@@ -2125,54 +2127,51 @@
 	 * right_bound = left_bound + WESTWOOD_RTT_MIN
          */
 
-	if ( (left_bound + rtt) < tcp_time_stamp)
+	if ((left_bound + rtt) < tcp_time_stamp)
 		ret = 1;
 
 	return ret;
 }
 
-
 /*
  * @westwood_update_window
- * It updates RTT evaluation window if it is the right moment to do 
+ * It updates RTT evaluation window if it is the right moment to do
  * it. If so it calls filter for evaluating bandwidth. Be careful
- * about __westwood_update_window() since it is called without 
+ * about __westwood_update_window() since it is called without
  * any form of lock. It should be used only for internal purposes.
  * Call westwood_update_window() instead.
  */
-
  
 static void __westwood_update_window(struct sock *sk, __u32 now)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 	__u32 delta = now - tp->westwood.rtt_win_sx;
 
-        if (!delta) 
-                return;
+	if (!delta)
+		return;
 
 	if (tp->westwood.rtt)
-                westwood_filter(sk, delta);
+		westwood_filter(sk, delta);
 
-        tp->westwood.bk = 0;
-        tp->westwood.rtt_win_sx = tcp_time_stamp;
+	tp->westwood.bk = 0;
+	tp->westwood.rtt_win_sx = tcp_time_stamp;
 }
 
-
 static void westwood_update_window(struct sock *sk, __u32 now)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
-	if(westwood_new_window(sk)) {
+	if (westwood_new_window(sk)) {
 		write_lock(&tp->westwood.lock);
 		__westwood_update_window(sk, now);
-		write_unlock(&tp->westwood.lock);		
+		write_unlock(&tp->westwood.lock);
 	}
-}	
+}
 
 /*
- * @__westwood_fast_bw 
+ * @__westwood_fast_bw
  * It is called when we are in fast path. In particular it is called when
- * header prediction is successfull. In such case infact update is 
+ * header prediction is successfull. In such case infact update is
  * straight forward and doesn't need any particular care.
  */
 
@@ -2183,7 +2182,7 @@
 	westwood_update_window(sk, tcp_time_stamp);
 
 	write_lock(&tp->westwood.lock);
-	tp->westwood.bk += westwood_acked(sk); 
+	tp->westwood.bk += westwood_acked(sk);
 	tp->westwood.snd_una = tp->snd_una;
 	tp->westwood.rtt_min = westwood_update_rttmin(sk);
 	write_unlock(&tp->westwood.lock);
@@ -2201,8 +2200,7 @@
 	return ((__u32)(tp->mss_cache));
 }
 
-
-/* 
+/*
  * @tcp_westwood_dupack_update
  * It updates accounted and cumul_ack when receiving a dupack.
  */
@@ -2217,58 +2215,53 @@
 	write_unlock(&tp->westwood.lock);
 }
 
-
 static inline int westwood_may_change_cumul(struct tcp_opt *tp)
 {
 	return ((tp->westwood.cumul_ack) > westwood_mss(tp));
 }
 
-
 static inline void westwood_partial_update(struct tcp_opt *tp)
 {
 	tp->westwood.accounted -= tp->westwood.cumul_ack;
 	tp->westwood.cumul_ack = westwood_mss(tp);
 }
 
-
 static inline void westwood_complete_update(struct tcp_opt *tp)
 {
 	tp->westwood.cumul_ack -= tp->westwood.accounted;
 	tp->westwood.accounted = 0;
 }
- 
+
 /*
  * @westwood_acked_count
  * This function evaluates cumul_ack for evaluating dk in case of
  * delayed or partial acks.
  */
- 
+
 static __u32 westwood_acked_count(struct sock *sk)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
 	tp->westwood.cumul_ack = westwood_acked(sk);
 
-        /* If cumul_ack is 0 this is a dupack since it's not moving
-         * tp->snd_una.
-         */
-
-        if (!(tp->westwood.cumul_ack))
-                westwood_dupack_update(sk);
+	/* If cumul_ack is 0 this is a dupack since it's not moving
+	 * tp->snd_una.
+	 */
+	if (!(tp->westwood.cumul_ack))
+		westwood_dupack_update(sk);
 
-        if (westwood_may_change_cumul(tp)) {
+	if (westwood_may_change_cumul(tp)) {
 		/* Partial or delayed ack */
-		if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack)) 
+		if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack))
 			westwood_partial_update(tp);
 		else 
 			westwood_complete_update(tp);
 	}
 
 	tp->westwood.snd_una = tp->snd_una;
-	
-	return(tp->westwood.cumul_ack);
-}	
 
+	return tp->westwood.cumul_ack;
+}
 
 /*
  * @__westwood_slow_bw
@@ -2283,11 +2276,11 @@
 
 	westwood_update_window(sk, tcp_time_stamp);
 
-	write_lock(&tp->westwood.lock);	
+	write_lock(&tp->westwood.lock);
 	tp->westwood.bk += westwood_acked_count(sk);
 	tp->westwood.rtt_min = westwood_update_rttmin(sk);
 	write_unlock(&tp->westwood.lock);
-}		 
+}
 
 /* TCP Westwood routines end here */
 
@@ -4127,9 +4120,8 @@
 		goto discard;
 
 	case TCP_SYN_SENT:
-
 		tcp_init_westwood(sk);
-		
+
 		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
 		if (queued >= 0)
 			return queued;


ChangeSet@1.1305, 2004-01-30 15:23:52-08:00, davem@nuts.davemloft.net
  [TCP]: Kill westwood specific lock, unneeded.

diff -Nru a/include/net/sock.h b/include/net/sock.h
--- a/include/net/sock.h	Fri Jan 30 15:44:56 2004
+++ b/include/net/sock.h	Fri Jan 30 15:44:56 2004
@@ -445,7 +445,6 @@
                 __u32    accounted;
                 __u32    rtt;
                 __u32    rtt_min;          /* minimum observed RTT */
-                rwlock_t lock;
         } westwood;
 };
 
diff -Nru a/include/net/tcp.h b/include/net/tcp.h
--- a/include/net/tcp.h	Fri Jan 30 15:44:56 2004
+++ b/include/net/tcp.h	Fri Jan 30 15:44:56 2004
@@ -1898,7 +1898,6 @@
 	tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
 	tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
 	tp->westwood.snd_una = tp->snd_una;
-	tp->westwood.lock = RW_LOCK_UNLOCKED;
 }
 
 static inline void tcp_init_westwood(struct sock *sk)
diff -Nru a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
--- a/net/ipv4/tcp_input.c	Fri Jan 30 15:44:56 2004
+++ b/net/ipv4/tcp_input.c	Fri Jan 30 15:44:56 2004
@@ -2113,10 +2113,8 @@
 	__u32 rtt;
 	int ret = 0;
 
-	read_lock(&tp->westwood.lock);
 	left_bound = tp->westwood.rtt_win_sx;
 	rtt = max(tp->westwood.rtt, (__u32)TCP_WESTWOOD_RTT_MIN);
-	read_unlock(&tp->westwood.lock);
 
 	/*
 	 * A RTT-window has passed. Be careful since if RTT is less than
@@ -2159,13 +2157,8 @@
 
 static void westwood_update_window(struct sock *sk, __u32 now)
 {
-	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
-	if (westwood_new_window(sk)) {
-		write_lock(&tp->westwood.lock);
+	if (westwood_new_window(sk))
 		__westwood_update_window(sk, now);
-		write_unlock(&tp->westwood.lock);
-	}
 }
 
 /*
@@ -2181,11 +2174,9 @@
 
 	westwood_update_window(sk, tcp_time_stamp);
 
-	write_lock(&tp->westwood.lock);
 	tp->westwood.bk += westwood_acked(sk);
 	tp->westwood.snd_una = tp->snd_una;
 	tp->westwood.rtt_min = westwood_update_rttmin(sk);
-	write_unlock(&tp->westwood.lock);
 }
 
 /*
@@ -2209,10 +2200,8 @@
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 
-	write_lock(&tp->westwood.lock);
 	tp->westwood.accounted += westwood_mss(tp);
 	tp->westwood.cumul_ack = westwood_mss(tp);
-	write_unlock(&tp->westwood.lock);
 }
 
 static inline int westwood_may_change_cumul(struct tcp_opt *tp)
@@ -2276,10 +2265,8 @@
 
 	westwood_update_window(sk, tcp_time_stamp);
 
-	write_lock(&tp->westwood.lock);
 	tp->westwood.bk += westwood_acked_count(sk);
 	tp->westwood.rtt_min = westwood_update_rttmin(sk);
-	write_unlock(&tp->westwood.lock);
 }
 
 /* TCP Westwood routines end here */


-
: send the line "unsubscribe linux-net" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Netdev]     [Ethernet Bridging]     [Linux 802.1Q VLAN]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Git]     [Bugtraq]     [Yosemite News and Information]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux PCI]     [Linux Admin]     [Samba]

  Powered by Linux