| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 1 | /* | 
| Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 2 |  * TCP Westwood+: end-to-end bandwidth estimation for TCP | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 3 |  * | 
| Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 4 |  *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4 | 
 | 5 |  * | 
 | 6 |  * Support at http://c3lab.poliba.it/index.php/Westwood | 
 | 7 |  * Main references in literature: | 
 | 8 |  * | 
 | 9 |  * - Mascolo S, Casetti, M. Gerla et al. | 
 | 10 |  *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001 | 
 | 11 |  * | 
 | 12 |  * - A. Grieco, s. Mascolo | 
 | 13 |  *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer | 
 | 14 |  *     Comm. Review, 2004 | 
 | 15 |  * | 
 | 16 |  * - A. Dell'Aera, L. Grieco, S. Mascolo. | 
 | 17 |  *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving : | 
 | 18 |  *    A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004 | 
 | 19 |  * | 
 | 20 |  * Westwood+ employs end-to-end bandwidth measurement to set cwnd and | 
 | 21 |  * ssthresh after packet loss. The probing phase is as the original Reno. | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 22 |  */ | 
 | 23 |  | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 24 | #include <linux/mm.h> | 
 | 25 | #include <linux/module.h> | 
 | 26 | #include <linux/skbuff.h> | 
| Arnaldo Carvalho de Melo | a8c2190 | 2005-08-12 12:56:38 -0300 | [diff] [blame] | 27 | #include <linux/inet_diag.h> | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 28 | #include <net/tcp.h> | 
 | 29 |  | 
 | 30 | /* TCP Westwood structure */ | 
 | 31 | struct westwood { | 
 | 32 | 	u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */ | 
 | 33 | 	u32    bw_est;           /* bandwidth estimate */ | 
 | 34 | 	u32    rtt_win_sx;       /* here starts a new evaluation... */ | 
 | 35 | 	u32    bk; | 
 | 36 | 	u32    snd_una;          /* used for evaluating the number of acked bytes */ | 
 | 37 | 	u32    cumul_ack; | 
 | 38 | 	u32    accounted; | 
 | 39 | 	u32    rtt; | 
 | 40 | 	u32    rtt_min;          /* minimum observed RTT */ | 
| Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 41 | 	u8     first_ack;        /* flag which infers that this is the first ack */ | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 42 | 	u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/ | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 43 | }; | 
 | 44 |  | 
 | 45 |  | 
 | 46 | /* TCP Westwood functions and constants */ | 
 | 47 | #define TCP_WESTWOOD_RTT_MIN   (HZ/20)	/* 50ms */ | 
 | 48 | #define TCP_WESTWOOD_INIT_RTT  (20*HZ)	/* maybe too conservative?! */ | 
 | 49 |  | 
 | 50 | /* | 
 | 51 |  * @tcp_westwood_create | 
 | 52 |  * This function initializes fields used in TCP Westwood+, | 
 | 53 |  * it is called after the initial SYN, so the sequence numbers | 
 | 54 |  * are correct but new passive connections we have no | 
 | 55 |  * information about RTTmin at this time so we simply set it to | 
 | 56 |  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative | 
 | 57 |  * since in this way we're sure it will be updated in a consistent | 
 | 58 |  * way as soon as possible. It will reasonably happen within the first | 
 | 59 |  * RTT period of the connection lifetime. | 
 | 60 |  */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 61 | static void tcp_westwood_init(struct sock *sk) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 62 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 63 | 	struct westwood *w = inet_csk_ca(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 64 |  | 
 | 65 | 	w->bk = 0; | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 66 | 	w->bw_ns_est = 0; | 
 | 67 | 	w->bw_est = 0; | 
 | 68 | 	w->accounted = 0; | 
 | 69 | 	w->cumul_ack = 0; | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 70 | 	w->reset_rtt_min = 1; | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 71 | 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; | 
 | 72 | 	w->rtt_win_sx = tcp_time_stamp; | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 73 | 	w->snd_una = tcp_sk(sk)->snd_una; | 
| Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 74 | 	w->first_ack = 1; | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 75 | } | 
 | 76 |  | 
 | 77 | /* | 
 | 78 |  * @westwood_do_filter | 
 | 79 |  * Low-pass filter. Implemented using constant coefficients. | 
 | 80 |  */ | 
 | 81 | static inline u32 westwood_do_filter(u32 a, u32 b) | 
 | 82 | { | 
 | 83 | 	return (((7 * a) + b) >> 3); | 
 | 84 | } | 
 | 85 |  | 
| Luca De Cicco | b3a92ea | 2006-06-11 23:01:59 -0700 | [diff] [blame] | 86 | static void westwood_filter(struct westwood *w, u32 delta) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 87 | { | 
| Luca De Cicco | b3a92ea | 2006-06-11 23:01:59 -0700 | [diff] [blame] | 88 | 	/* If the filter is empty fill it with the first sample of bandwidth  */ | 
 | 89 | 	if (w->bw_ns_est == 0 && w->bw_est == 0) { | 
 | 90 | 		w->bw_ns_est = w->bk / delta; | 
 | 91 | 		w->bw_est = w->bw_ns_est; | 
 | 92 | 	} else { | 
 | 93 | 		w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta); | 
 | 94 | 		w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est); | 
 | 95 | 	} | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 96 | } | 
 | 97 |  | 
 | 98 | /* | 
 | 99 |  * @westwood_pkts_acked | 
 | 100 |  * Called after processing group of packets. | 
 | 101 |  * but all westwood needs is the last sample of srtt. | 
 | 102 |  */ | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 103 | static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 104 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 105 | 	struct westwood *w = inet_csk_ca(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 106 | 	if (cnt > 0) | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 107 | 		w->rtt = tcp_sk(sk)->srtt >> 3; | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 108 | } | 
 | 109 |  | 
 | 110 | /* | 
 | 111 |  * @westwood_update_window | 
 | 112 |  * It updates RTT evaluation window if it is the right moment to do | 
 | 113 |  * it. If so it calls filter for evaluating bandwidth. | 
 | 114 |  */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 115 | static void westwood_update_window(struct sock *sk) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 116 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 117 | 	struct westwood *w = inet_csk_ca(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 118 | 	s32 delta = tcp_time_stamp - w->rtt_win_sx; | 
 | 119 |  | 
| Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 120 | 	/* Initialize w->snd_una with the first acked sequence number in order | 
| Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 121 | 	 * to fix mismatch between tp->snd_una and w->snd_una for the first | 
 | 122 | 	 * bandwidth sample | 
 | 123 | 	 */ | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 124 | 	if (w->first_ack) { | 
| Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 125 | 		w->snd_una = tcp_sk(sk)->snd_una; | 
 | 126 | 		w->first_ack = 0; | 
 | 127 | 	} | 
 | 128 |  | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 129 | 	/* | 
 | 130 | 	 * See if a RTT-window has passed. | 
 | 131 | 	 * Be careful since if RTT is less than | 
 | 132 | 	 * 50ms we don't filter but we continue 'building the sample'. | 
 | 133 | 	 * This minimum limit was chosen since an estimation on small | 
 | 134 | 	 * time intervals is better to avoid... | 
 | 135 | 	 * Obviously on a LAN we reasonably will always have | 
 | 136 | 	 * right_bound = left_bound + WESTWOOD_RTT_MIN | 
 | 137 | 	 */ | 
 | 138 | 	if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) { | 
 | 139 | 		westwood_filter(w, delta); | 
 | 140 |  | 
 | 141 | 		w->bk = 0; | 
 | 142 | 		w->rtt_win_sx = tcp_time_stamp; | 
 | 143 | 	} | 
 | 144 | } | 
 | 145 |  | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 146 | static inline void update_rtt_min(struct westwood *w) | 
 | 147 | { | 
 | 148 | 	if (w->reset_rtt_min) { | 
 | 149 | 		w->rtt_min = w->rtt; | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 150 | 		w->reset_rtt_min = 0; | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 151 | 	} else | 
 | 152 | 		w->rtt_min = min(w->rtt, w->rtt_min); | 
 | 153 | } | 
 | 154 |  | 
 | 155 |  | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 156 | /* | 
 | 157 |  * @westwood_fast_bw | 
 | 158 |  * It is called when we are in fast path. In particular it is called when | 
 | 159 |  * header prediction is successful. In such case in fact update is | 
 | 160 |  * straight forward and doesn't need any particular care. | 
 | 161 |  */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 162 | static inline void westwood_fast_bw(struct sock *sk) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 163 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 164 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
 | 165 | 	struct westwood *w = inet_csk_ca(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 166 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 167 | 	westwood_update_window(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 168 |  | 
 | 169 | 	w->bk += tp->snd_una - w->snd_una; | 
 | 170 | 	w->snd_una = tp->snd_una; | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 171 | 	update_rtt_min(w); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 172 | } | 
 | 173 |  | 
 | 174 | /* | 
 | 175 |  * @westwood_acked_count | 
 | 176 |  * This function evaluates cumul_ack for evaluating bk in case of | 
 | 177 |  * delayed or partial acks. | 
 | 178 |  */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 179 | static inline u32 westwood_acked_count(struct sock *sk) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 180 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 181 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
 | 182 | 	struct westwood *w = inet_csk_ca(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 183 |  | 
 | 184 | 	w->cumul_ack = tp->snd_una - w->snd_una; | 
 | 185 |  | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 186 | 	/* If cumul_ack is 0 this is a dupack since it's not moving | 
 | 187 | 	 * tp->snd_una. | 
 | 188 | 	 */ | 
 | 189 | 	if (!w->cumul_ack) { | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 190 | 		w->accounted += tp->mss_cache; | 
 | 191 | 		w->cumul_ack = tp->mss_cache; | 
 | 192 | 	} | 
 | 193 |  | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 194 | 	if (w->cumul_ack > tp->mss_cache) { | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 195 | 		/* Partial or delayed ack */ | 
 | 196 | 		if (w->accounted >= w->cumul_ack) { | 
 | 197 | 			w->accounted -= w->cumul_ack; | 
 | 198 | 			w->cumul_ack = tp->mss_cache; | 
 | 199 | 		} else { | 
 | 200 | 			w->cumul_ack -= w->accounted; | 
 | 201 | 			w->accounted = 0; | 
 | 202 | 		} | 
 | 203 | 	} | 
 | 204 |  | 
 | 205 | 	w->snd_una = tp->snd_una; | 
 | 206 |  | 
 | 207 | 	return w->cumul_ack; | 
 | 208 | } | 
 | 209 |  | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 210 |  | 
 | 211 | /* | 
 | 212 |  * TCP Westwood | 
 | 213 |  * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it | 
 | 214 |  * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 | 
 | 215 |  * so avoids ever returning 0. | 
 | 216 |  */ | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 217 | static u32 tcp_westwood_bw_rttmin(const struct sock *sk) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 218 | { | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 219 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
 | 220 | 	const struct westwood *w = inet_csk_ca(sk); | 
 | 221 | 	return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 222 | } | 
 | 223 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 224 | static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 225 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 226 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 227 | 	struct westwood *w = inet_csk_ca(sk); | 
| Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 228 |  | 
| Stephen Hemminger | 2de979b | 2007-03-08 20:45:19 -0800 | [diff] [blame] | 229 | 	switch (event) { | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 230 | 	case CA_EVENT_FAST_ACK: | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 231 | 		westwood_fast_bw(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 232 | 		break; | 
 | 233 |  | 
 | 234 | 	case CA_EVENT_COMPLETE_CWR: | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 235 | 		tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 236 | 		break; | 
 | 237 |  | 
 | 238 | 	case CA_EVENT_FRTO: | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 239 | 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 240 | 		/* Update RTT_min when next ack arrives */ | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 241 | 		w->reset_rtt_min = 1; | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 242 | 		break; | 
 | 243 |  | 
 | 244 | 	case CA_EVENT_SLOW_ACK: | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 245 | 		westwood_update_window(sk); | 
 | 246 | 		w->bk += westwood_acked_count(sk); | 
| Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 247 | 		update_rtt_min(w); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 248 | 		break; | 
 | 249 |  | 
 | 250 | 	default: | 
 | 251 | 		/* don't care */ | 
 | 252 | 		break; | 
 | 253 | 	} | 
 | 254 | } | 
 | 255 |  | 
 | 256 |  | 
 | 257 | /* Extract info for Tcp socket info provided via netlink. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 258 | static void tcp_westwood_info(struct sock *sk, u32 ext, | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 259 | 			      struct sk_buff *skb) | 
 | 260 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 261 | 	const struct westwood *ca = inet_csk_ca(sk); | 
| Arnaldo Carvalho de Melo | 73c1f4a | 2005-08-12 12:51:49 -0300 | [diff] [blame] | 262 | 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { | 
| Thomas Graf | 2672810 | 2007-03-22 23:27:19 -0700 | [diff] [blame] | 263 | 		struct tcpvegas_info info = { | 
 | 264 | 			.tcpv_enabled = 1, | 
 | 265 | 			.tcpv_rtt = jiffies_to_usecs(ca->rtt), | 
 | 266 | 			.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), | 
 | 267 | 		}; | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 268 |  | 
| Thomas Graf | 2672810 | 2007-03-22 23:27:19 -0700 | [diff] [blame] | 269 | 		nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 270 | 	} | 
 | 271 | } | 
 | 272 |  | 
 | 273 |  | 
 | 274 | static struct tcp_congestion_ops tcp_westwood = { | 
 | 275 | 	.init		= tcp_westwood_init, | 
 | 276 | 	.ssthresh	= tcp_reno_ssthresh, | 
 | 277 | 	.cong_avoid	= tcp_reno_cong_avoid, | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 278 | 	.min_cwnd	= tcp_westwood_bw_rttmin, | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 279 | 	.cwnd_event	= tcp_westwood_event, | 
 | 280 | 	.get_info	= tcp_westwood_info, | 
 | 281 | 	.pkts_acked	= tcp_westwood_pkts_acked, | 
 | 282 |  | 
 | 283 | 	.owner		= THIS_MODULE, | 
 | 284 | 	.name		= "westwood" | 
 | 285 | }; | 
 | 286 |  | 
 | 287 | static int __init tcp_westwood_register(void) | 
 | 288 | { | 
| Alexey Dobriyan | 74975d4 | 2006-08-25 17:10:33 -0700 | [diff] [blame] | 289 | 	BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE); | 
| Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 290 | 	return tcp_register_congestion_control(&tcp_westwood); | 
 | 291 | } | 
 | 292 |  | 
 | 293 | static void __exit tcp_westwood_unregister(void) | 
 | 294 | { | 
 | 295 | 	tcp_unregister_congestion_control(&tcp_westwood); | 
 | 296 | } | 
 | 297 |  | 
 | 298 | module_init(tcp_westwood_register); | 
 | 299 | module_exit(tcp_westwood_unregister); | 
 | 300 |  | 
 | 301 | MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera"); | 
 | 302 | MODULE_LICENSE("GPL"); | 
 | 303 | MODULE_DESCRIPTION("TCP Westwood+"); |