TCP Westwood Patch

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi there to the list.

I'm Luca De Cicco a PhD student working on TCP Westwood+ at Politecnico
di Bari.

I have attached a patch (it applies to 2.6.16.18 but it has been also
tested on 2.6.15 too) for the TCP Westwood+ congestion control
algorithm which fixes a bug and adds some new features/improvements:

o BUGFIX. The first sample was wrong

o RTT_min is updated each time a timeout event occurs (in order to
  cope with hard handovers in wireless scenarios)

o The bandwidth estimate filter is now initialized with the first
bandwidth sample in order to have better performances in the case of
small file transfers.

Best regards,

Luca De Cicco
Politecnico di Bari
--- linux-2.6.16.18/net/ipv4/tcp_westwood.c	2006-05-22 20:04:35.000000000 +0200
+++ linux-2.6.16.18_ldc/net/ipv4/tcp_westwood.c	2006-05-24 16:22:29.000000000 +0200
@@ -1,9 +1,29 @@
 /*
- * TCP Westwood+
+ * TCP Westwood+: end-to-end bandwidth estimation for TCP
  *
- *	Angelo Dell'Aera:	TCP Westwood+ support
+ *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
+ *      Luca De Cicco: current support of Westwood+ and author of the last patch 
+ *                     with: updated RTT_min and initial bandwidth estimate
+ *
+ * Support at http://c3lab.poliba.it/index.php/Westwood
+ * Main references in literature:
+ *
+ * - Mascolo S, Casetti, M. Gerla et al. 
+ *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001 
+ * 
+ * - A. Grieco, s. Mascolo
+ *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
+ *     Comm. Review, 2004
+ *
+ * - A. Dell'Aera, L. Grieco, S. Mascolo.
+ *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving : 
+ *     A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
+ *
+ * Westwood+ employs end-to-end bandwdidth measurement to set cwnd and
+ * ssthresh after packet loss. The probing phase is as the original Reno.
  */
 
+
 #include <linux/config.h>
 #include <linux/mm.h>
 #include <linux/module.h>
@@ -22,6 +42,9 @@
 	u32    accounted;
 	u32    rtt;
 	u32    rtt_min;          /* minimum observed RTT */
+	u16    first_ack;	 /* flag which infers that this is the first ack */
+	u16    reset_rtt_min;    /* Please reset RTT min to RTT sample*/
+	
 };
 
 
@@ -49,9 +72,11 @@
         w->bw_est = 0;
         w->accounted = 0;
         w->cumul_ack = 0;
+	w->reset_rtt_min = 1;
 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
 	w->rtt_win_sx = tcp_time_stamp;
 	w->snd_una = tcp_sk(sk)->snd_una;
+	w->first_ack = 1;
 }
 
 /*
@@ -65,8 +90,17 @@
 
 static inline void westwood_filter(struct westwood *w, u32 delta)
 {
-	w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
-	w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
+	/*
+	 * If the filter is empty fill it with the first sample of bandwidth
+	 */
+	if (w->bw_ns_est==0 && w->bw_est==0)
+	{
+		w->bw_ns_est = w->bk / delta;
+		w->bw_est = w->bw_ns_est ;
+	} else {
+		w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
+		w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
+	}
 }
 
 /*
@@ -101,6 +135,7 @@
 	 * right_bound = left_bound + WESTWOOD_RTT_MIN
 	 */
 	if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
+	        const struct tcp_sock *tp = tcp_sk(sk);
 		westwood_filter(w, delta);
 
 		w->bk = 0;
@@ -108,6 +143,18 @@
 	}
 }
 
+static inline void update_rtt_min(struct sock *sk)
+{
+	struct westwood *w = inet_csk_ca(sk);
+	
+	if (w->reset_rtt_min) {
+		w->rtt_min = w->rtt;
+		w->reset_rtt_min = 0;
+	} else {
+		w->rtt_min = min(w->rtt, w->rtt_min);
+	}
+}
+
 /*
  * @westwood_fast_bw
  * It is called when we are in fast path. In particular it is called when
@@ -120,10 +167,10 @@
 	struct westwood *w = inet_csk_ca(sk);
 
 	westwood_update_window(sk);
-
 	w->bk += tp->snd_una - w->snd_una;
 	w->snd_una = tp->snd_una;
-	w->rtt_min = min(w->rtt, w->rtt_min);
+	
+	update_rtt_min(sk);
 }
 
 /*
@@ -135,7 +182,6 @@
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	struct westwood *w = inet_csk_ca(sk);
-
 	w->cumul_ack = tp->snd_una - w->snd_una;
 
         /* If cumul_ack is 0 this is a dupack since it's not moving
@@ -158,7 +204,6 @@
 	}
 
 	w->snd_una = tp->snd_una;
-
 	return w->cumul_ack;
 }
 
@@ -185,6 +230,14 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct westwood *w = inet_csk_ca(sk);
 
+	/* Initialise w->snd_una with the first acked sequence number in oder
+	 * to fix mismatch between tp->snd_una and w->snd_una for the first 
+	 * bandwidth sample
+	 */
+	if(w->first_ack && (event == CA_EVENT_FAST_ACK||event == CA_EVENT_SLOW_ACK)) {
+		w->snd_una = tp->snd_una;
+		w->first_ack = 0;
+	}
 	switch(event) {
 	case CA_EVENT_FAST_ACK:
 		westwood_fast_bw(sk);
@@ -196,12 +249,14 @@
 
 	case CA_EVENT_FRTO:
 		tp->snd_ssthresh = westwood_bw_rttmin(sk);
+		/* Please update RTT_min when next ack arrives */
+		w->reset_rtt_min = 1;
 		break;
 
 	case CA_EVENT_SLOW_ACK:
 		westwood_update_window(sk);
 		w->bk += westwood_acked_count(sk);
-		w->rtt_min = min(w->rtt, w->rtt_min);
+		update_rtt_min(sk);
 		break;
 
 	default:
@@ -219,14 +274,14 @@
 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
 		struct rtattr *rta;
 		struct tcpvegas_info *info;
-
+		
 		rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
 		info = RTA_DATA(rta);
 		info->tcpv_enabled = 1;
 		info->tcpv_rttcnt = 0;
 		info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
 		info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
-	rtattr_failure:	;
+	rtattr_failure: ;
 	}
 }
 

[Index of Archives]     [Netdev]     [Ethernet Bridging]     [Linux 802.1Q VLAN]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Git]     [Bugtraq]     [Yosemite News and Information]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux PCI]     [Linux Admin]     [Samba]

  Powered by Linux