Re: [PATCH] TCP Westwood+ (2.6.3-rc2)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, 10 Feb 2004 18:40:47 +0100
"Angelo Dell'Aera" <buffer@antifork.org> wrote:

>Dave,
>just a trivial patch for fixing westwood functions prefixes
>and few comments. It applies against kernel 2.6.3-rc2. I
>didn't compile it but it's really too trivial! :)

ARGHH!
Really apologies. I'll never do such a stupid thing again!
This is the correct patch (this time compile-tested). Just
sorry.

--

Angelo Dell'Aera 'buffer' 
Antifork Research, Inc.	  	http://buffer.antifork.org


--- linux-2.6.3-rc2/net/ipv4/tcp_input.c.old	2004-02-10 18:17:37.000000000 +0100
+++ linux-2.6.3-rc2/net/ipv4/tcp_input.c	2004-02-10 20:48:34.000000000 +0100
@@ -2051,17 +2051,20 @@
 	 */
 	tp->frto_counter = (tp->frto_counter + 1) % 3;
 }
+
 /*
  * TCP Westwood
  * Functions needed for estimating bandwidth.
  */
 
 /*
+ * @tcp_westwood_init
  * This function initializes fields used in TCP Westwood.
  * We can't get no information about RTT at this time so
  * we are forced to set it to 0.
  */
-static void init_westwood(struct sock *sk)
+
+static void tcp_westwood_init(struct sock *sk)
 {
         struct tcp_opt *tp = tcp_sk(sk);
 
@@ -2077,32 +2080,34 @@
 }
 
 /*
- * @westwood_do_filter
+ * @tcp_westwood_do_filter
  * Low-pass filter. Implemented using constant coeffients.
  */
-static inline __u32 westwood_do_filter(__u32 a, __u32 b)
+
+static inline __u32 tcp_westwood_do_filter(__u32 a, __u32 b)
 {
 	return (((7 * a) + b) >> 3);
 }
 
-static void westwood_filter(struct sock *sk, __u32 delta)
+static void tcp_westwood_filter(struct sock *sk, __u32 delta)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 	__u32 sample = tp->westwood.bk / delta;
 
 	tp->westwood.bw_ns_est =
-		westwood_do_filter(tp->westwood.bw_ns_est, sample);
+		tcp_westwood_do_filter(tp->westwood.bw_ns_est, sample);
 	tp->westwood.bw_est =
-		westwood_do_filter(tp->westwood.bw_est,
+		tcp_westwood_do_filter(tp->westwood.bw_est,
 				   tp->westwood.bw_ns_est);
 	tp->westwood.bw_sample = sample;
 }
 
-/* @westwood_update_rttmin
+/* @tcp_westwood_update_rttmin
  * It is used to update RTTmin. In this case we MUST NOT use
  * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
  */
-static inline __u32 westwood_update_rttmin(struct sock *sk)
+
+static inline __u32 tcp_westwood_update_rttmin(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 	__u32 rttmin = tp->westwood.rtt_min;
@@ -2117,11 +2122,11 @@
 }
 
 /*
- * @westwood_acked
- * Evaluate increases for dk. It requires no lock since when it is
- * called lock should already be held. Be careful about it!
+ * @tcp_westwood_acked
+ * Evaluate increases for dk. 
  */
-static inline __u32 westwood_acked(struct sock *sk)
+
+static inline __u32 tcp_westwood_acked(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 
@@ -2129,14 +2134,15 @@
 }
 
 /*
- * @westwood_new_window
+ * @tcp_westwood_new_window
  * It evaluates if we are receiving data inside the same RTT window as
  * when we started.
  * Return value:
  * It returns 0 if we are still evaluating samples in the same RTT
  * window, 1 if the sample has to be considered in the next window.
  */
-static int westwood_new_window(struct sock *sk)
+
+static int tcp_westwood_new_window(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 	__u32 left_bound;
@@ -2162,14 +2168,12 @@
 }
 
 /*
- * @westwood_update_window
+ * @tcp_westwood_update_window
  * It updates RTT evaluation window if it is the right moment to do
- * it. If so it calls filter for evaluating bandwidth. Be careful
- * about __westwood_update_window() since it is called without
- * any form of lock. It should be used only for internal purposes.
- * Call westwood_update_window() instead.
+ * it. If so it calls filter for evaluating bandwidth. 
  */
-static void __westwood_update_window(struct sock *sk, __u32 now)
+
+static void __tcp_westwood_update_window(struct sock *sk, __u32 now)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 	__u32 delta = now - tp->westwood.rtt_win_sx;
@@ -2178,34 +2182,35 @@
                 return;
 
 	if (tp->westwood.rtt)
-                westwood_filter(sk, delta);
+                tcp_westwood_filter(sk, delta);
 
         tp->westwood.bk = 0;
         tp->westwood.rtt_win_sx = tcp_time_stamp;
 }
 
 
-static void westwood_update_window(struct sock *sk, __u32 now)
+static void tcp_westwood_update_window(struct sock *sk, __u32 now)
 {
-	if (westwood_new_window(sk)) 
-		__westwood_update_window(sk, now);
+	if (tcp_westwood_new_window(sk)) 
+		__tcp_westwood_update_window(sk, now);
 }
 
 /*
- * @__westwood_fast_bw
+ * @__tcp_westwood_fast_bw
  * It is called when we are in fast path. In particular it is called when
  * header prediction is successfull. In such case infact update is
  * straight forward and doesn't need any particular care.
  */
+
 void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 
-	westwood_update_window(sk, tcp_time_stamp);
+	tcp_westwood_update_window(sk, tcp_time_stamp);
 
-	tp->westwood.bk += westwood_acked(sk);
+	tp->westwood.bk += tcp_westwood_acked(sk);
 	tp->westwood.snd_una = tp->snd_una;
-	tp->westwood.rtt_min = westwood_update_rttmin(sk);
+	tp->westwood.rtt_min = tcp_westwood_update_rttmin(sk);
 }
 
 
@@ -2214,7 +2219,7 @@
  * It updates accounted and cumul_ack when receiving a dupack.
  */
 
-static void westwood_dupack_update(struct sock *sk)
+static void tcp_westwood_dupack_update(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 
@@ -2222,46 +2227,46 @@
 	tp->westwood.cumul_ack = tp->mss_cache;
 }
 
-static inline int westwood_may_change_cumul(struct tcp_opt *tp)
+static inline int tcp_westwood_may_change_cumul(struct tcp_opt *tp)
 {
 	return ((tp->westwood.cumul_ack) > tp->mss_cache);
 }
 
-static inline void westwood_partial_update(struct tcp_opt *tp)
+static inline void tcp_westwood_partial_update(struct tcp_opt *tp)
 {
 	tp->westwood.accounted -= tp->westwood.cumul_ack;
 	tp->westwood.cumul_ack = tp->mss_cache;
 }
 
-static inline void westwood_complete_update(struct tcp_opt *tp)
+static inline void tcp_westwood_complete_update(struct tcp_opt *tp)
 {
 	tp->westwood.cumul_ack -= tp->westwood.accounted;
 	tp->westwood.accounted = 0;
 }
 
 /*
- * @westwood_acked_count
+ * @tcp_westwood_acked_count
  * This function evaluates cumul_ack for evaluating dk in case of
  * delayed or partial acks.
  */
-static __u32 westwood_acked_count(struct sock *sk)
+static __u32 tcp_westwood_acked_count(struct sock *sk)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 
-	tp->westwood.cumul_ack = westwood_acked(sk);
+	tp->westwood.cumul_ack = tcp_westwood_acked(sk);
 
         /* If cumul_ack is 0 this is a dupack since it's not moving
          * tp->snd_una.
          */
         if (!(tp->westwood.cumul_ack))
-                westwood_dupack_update(sk);
+                tcp_westwood_dupack_update(sk);
 
-        if (westwood_may_change_cumul(tp)) {
+        if (tcp_westwood_may_change_cumul(tp)) {
 		/* Partial or delayed ack */
 		if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack))
-			westwood_partial_update(tp);
+			tcp_westwood_partial_update(tp);
 		else
-			westwood_complete_update(tp);
+			tcp_westwood_complete_update(tp);
 	}
 
 	tp->westwood.snd_una = tp->snd_una;
@@ -2271,19 +2276,19 @@
 
 
 /*
- * @__westwood_slow_bw
+ * @__tcp_westwood_slow_bw
  * It is called when something is going wrong..even if there could
- * be no problems! Infact a simple delayed packet may trigger a
+ * be no problems! In fact a simple delayed packet may trigger a
  * dupack. But we need to be careful in such case.
  */
 void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 
-	westwood_update_window(sk, tcp_time_stamp);
+	tcp_westwood_update_window(sk, tcp_time_stamp);
 
-	tp->westwood.bk += westwood_acked_count(sk);
-	tp->westwood.rtt_min = westwood_update_rttmin(sk);
+	tp->westwood.bk += tcp_westwood_acked_count(sk);
+	tp->westwood.rtt_min = tcp_westwood_update_rttmin(sk);
 }
 
 /* This routine deals with incoming acks, but not outgoing ones. */
@@ -4124,7 +4129,7 @@
 			if(tp->af_specific->conn_request(sk, skb) < 0)
 				return 1;
 
-			init_westwood(sk);
+			tcp_westwood_init(sk);
 
 			/* Now we have several options: In theory there is 
 			 * nothing else in the frame. KA9Q has an option to 
@@ -4147,7 +4152,7 @@
 		goto discard;
 
 	case TCP_SYN_SENT:
-		init_westwood(sk);
+		tcp_westwood_init(sk);
 
 		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
 		if (queued >= 0)
-
: send the line "unsubscribe linux-net" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Netdev]     [Ethernet Bridging]     [Linux 802.1Q VLAN]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Git]     [Bugtraq]     [Yosemite News and Information]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux PCI]     [Linux Admin]     [Samba]

  Powered by Linux