| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * TCP Illinois congestion control. | 
 | 3 |  * Home page: | 
 | 4 |  *	http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html | 
 | 5 |  * | 
 | 6 |  * The algorithm is described in: | 
 | 7 |  * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm | 
 | 8 |  *  for High-Speed Networks" | 
| Justin P. Mattock | 631dd1a | 2010-10-18 11:03:14 +0200 | [diff] [blame] | 9 |  * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 10 |  * | 
 | 11 |  * Implemented from description in paper and ns-2 simulation. | 
 | 12 |  * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> | 
 | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/skbuff.h> | 
 | 17 | #include <linux/inet_diag.h> | 
 | 18 | #include <asm/div64.h> | 
 | 19 | #include <net/tcp.h> | 
 | 20 |  | 
 | 21 | #define ALPHA_SHIFT	7 | 
 | 22 | #define ALPHA_SCALE	(1u<<ALPHA_SHIFT) | 
 | 23 | #define ALPHA_MIN	((3*ALPHA_SCALE)/10)	/* ~0.3 */ | 
 | 24 | #define ALPHA_MAX	(10*ALPHA_SCALE)	/* 10.0 */ | 
 | 25 | #define ALPHA_BASE	ALPHA_SCALE		/* 1.0 */ | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 26 | #define U32_MAX		((u32)~0U) | 
 | 27 | #define RTT_MAX		(U32_MAX / ALPHA_MAX)	/* 3.3 secs */ | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 28 |  | 
 | 29 | #define BETA_SHIFT	6 | 
 | 30 | #define BETA_SCALE	(1u<<BETA_SHIFT) | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 31 | #define BETA_MIN	(BETA_SCALE/8)		/* 0.125 */ | 
 | 32 | #define BETA_MAX	(BETA_SCALE/2)		/* 0.5 */ | 
 | 33 | #define BETA_BASE	BETA_MAX | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 34 |  | 
 | 35 | static int win_thresh __read_mostly = 15; | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 36 | module_param(win_thresh, int, 0); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 37 | MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing"); | 
 | 38 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 39 | static int theta __read_mostly = 5; | 
 | 40 | module_param(theta, int, 0); | 
 | 41 | MODULE_PARM_DESC(theta, "# of fast RTT's before full growth"); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 42 |  | 
 | 43 | /* TCP Illinois Parameters */ | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 44 | struct illinois { | 
 | 45 | 	u64	sum_rtt;	/* sum of rtt's measured within last rtt */ | 
 | 46 | 	u16	cnt_rtt;	/* # of rtts measured within last rtt */ | 
 | 47 | 	u32	base_rtt;	/* min of all rtt in usec */ | 
 | 48 | 	u32	max_rtt;	/* max of all rtt in usec */ | 
 | 49 | 	u32	end_seq;	/* right edge of current RTT */ | 
 | 50 | 	u32	alpha;		/* Additive increase */ | 
 | 51 | 	u32	beta;		/* Muliplicative decrease */ | 
 | 52 | 	u16	acked;		/* # packets acked by current ACK */ | 
 | 53 | 	u8	rtt_above;	/* average rtt has gone above threshold */ | 
 | 54 | 	u8	rtt_low;	/* # of rtts measurements below threshold */ | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 55 | }; | 
 | 56 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 57 | static void rtt_reset(struct sock *sk) | 
 | 58 | { | 
 | 59 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 60 | 	struct illinois *ca = inet_csk_ca(sk); | 
 | 61 |  | 
 | 62 | 	ca->end_seq = tp->snd_nxt; | 
 | 63 | 	ca->cnt_rtt = 0; | 
 | 64 | 	ca->sum_rtt = 0; | 
 | 65 |  | 
 | 66 | 	/* TODO: age max_rtt? */ | 
 | 67 | } | 
 | 68 |  | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 69 | static void tcp_illinois_init(struct sock *sk) | 
 | 70 | { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 71 | 	struct illinois *ca = inet_csk_ca(sk); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 72 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 73 | 	ca->alpha = ALPHA_MAX; | 
 | 74 | 	ca->beta = BETA_BASE; | 
 | 75 | 	ca->base_rtt = 0x7fffffff; | 
 | 76 | 	ca->max_rtt = 0; | 
 | 77 |  | 
 | 78 | 	ca->acked = 0; | 
 | 79 | 	ca->rtt_low = 0; | 
 | 80 | 	ca->rtt_above = 0; | 
 | 81 |  | 
 | 82 | 	rtt_reset(sk); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 83 | } | 
 | 84 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 85 | /* Measure RTT for each ack. */ | 
| Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 86 | static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 87 | { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 88 | 	struct illinois *ca = inet_csk_ca(sk); | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 89 |  | 
 | 90 | 	ca->acked = pkts_acked; | 
 | 91 |  | 
| Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 92 | 	/* dup ack, no rtt sample */ | 
 | 93 | 	if (rtt < 0) | 
| Ilpo Järvinen | b9ce204 | 2007-06-15 15:08:43 -0700 | [diff] [blame] | 94 | 		return; | 
 | 95 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 96 | 	/* ignore bogus values, this prevents wraparound in alpha math */ | 
 | 97 | 	if (rtt > RTT_MAX) | 
 | 98 | 		rtt = RTT_MAX; | 
 | 99 |  | 
 | 100 | 	/* keep track of minimum RTT seen so far */ | 
 | 101 | 	if (ca->base_rtt > rtt) | 
 | 102 | 		ca->base_rtt = rtt; | 
 | 103 |  | 
 | 104 | 	/* and max */ | 
 | 105 | 	if (ca->max_rtt < rtt) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 106 | 		ca->max_rtt = rtt; | 
 | 107 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 108 | 	++ca->cnt_rtt; | 
 | 109 | 	ca->sum_rtt += rtt; | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 110 | } | 
 | 111 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 112 | /* Maximum queuing delay */ | 
 | 113 | static inline u32 max_delay(const struct illinois *ca) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 114 | { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 115 | 	return ca->max_rtt - ca->base_rtt; | 
 | 116 | } | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 117 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 118 | /* Average queuing delay */ | 
 | 119 | static inline u32 avg_delay(const struct illinois *ca) | 
 | 120 | { | 
 | 121 | 	u64 t = ca->sum_rtt; | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 122 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 123 | 	do_div(t, ca->cnt_rtt); | 
 | 124 | 	return t - ca->base_rtt; | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 125 | } | 
 | 126 |  | 
 | 127 | /* | 
 | 128 |  * Compute value of alpha used for additive increase. | 
 | 129 |  * If small window then use 1.0, equivalent to Reno. | 
 | 130 |  * | 
 | 131 |  * For larger windows, adjust based on average delay. | 
 | 132 |  * A. If average delay is at minimum (we are uncongested), | 
 | 133 |  *    then use large alpha (10.0) to increase faster. | 
 | 134 |  * B. If average delay is at maximum (getting congested) | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 135 |  *    then use small alpha (0.3) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 136 |  * | 
 | 137 |  * The result is a convex window growth curve. | 
 | 138 |  */ | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 139 | static u32 alpha(struct illinois *ca, u32 da, u32 dm) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 140 | { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 141 | 	u32 d1 = dm / 100;	/* Low threshold */ | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 142 |  | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 143 | 	if (da <= d1) { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 144 | 		/* If never got out of low delay zone, then use max */ | 
 | 145 | 		if (!ca->rtt_above) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 146 | 			return ALPHA_MAX; | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 147 |  | 
 | 148 | 		/* Wait for 5 good RTT's before allowing alpha to go alpha max. | 
 | 149 | 		 * This prevents one good RTT from causing sudden window increase. | 
 | 150 | 		 */ | 
 | 151 | 		if (++ca->rtt_low < theta) | 
 | 152 | 			return ca->alpha; | 
 | 153 |  | 
 | 154 | 		ca->rtt_low = 0; | 
 | 155 | 		ca->rtt_above = 0; | 
 | 156 | 		return ALPHA_MAX; | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 157 | 	} | 
 | 158 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 159 | 	ca->rtt_above = 1; | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 160 |  | 
 | 161 | 	/* | 
 | 162 | 	 * Based on: | 
 | 163 | 	 * | 
 | 164 | 	 *      (dm - d1) amin amax | 
 | 165 | 	 * k1 = ------------------- | 
 | 166 | 	 *         amax - amin | 
 | 167 | 	 * | 
 | 168 | 	 *       (dm - d1) amin | 
 | 169 | 	 * k2 = ----------------  - d1 | 
 | 170 | 	 *        amax - amin | 
 | 171 | 	 * | 
 | 172 | 	 *             k1 | 
 | 173 | 	 * alpha = ---------- | 
 | 174 | 	 *          k2 + da | 
 | 175 | 	 */ | 
 | 176 |  | 
 | 177 | 	dm -= d1; | 
 | 178 | 	da -= d1; | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 179 | 	return (dm * ALPHA_MAX) / | 
 | 180 | 		(dm + (da  * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 181 | } | 
 | 182 |  | 
 | 183 | /* | 
 | 184 |  * Beta used for multiplicative decrease. | 
 | 185 |  * For small window sizes returns same value as Reno (0.5) | 
 | 186 |  * | 
 | 187 |  * If delay is small (10% of max) then beta = 1/8 | 
 | 188 |  * If delay is up to 80% of max then beta = 1/2 | 
 | 189 |  * In between is a linear function | 
 | 190 |  */ | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 191 | static u32 beta(u32 da, u32 dm) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 192 | { | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 193 | 	u32 d2, d3; | 
 | 194 |  | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 195 | 	d2 = dm / 10; | 
 | 196 | 	if (da <= d2) | 
 | 197 | 		return BETA_MIN; | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 198 |  | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 199 | 	d3 = (8 * dm) / 10; | 
 | 200 | 	if (da >= d3 || d3 <= d2) | 
 | 201 | 		return BETA_MAX; | 
 | 202 |  | 
 | 203 | 	/* | 
 | 204 | 	 * Based on: | 
 | 205 | 	 * | 
 | 206 | 	 *       bmin d3 - bmax d2 | 
 | 207 | 	 * k3 = ------------------- | 
 | 208 | 	 *         d3 - d2 | 
 | 209 | 	 * | 
 | 210 | 	 *       bmax - bmin | 
 | 211 | 	 * k4 = ------------- | 
 | 212 | 	 *         d3 - d2 | 
 | 213 | 	 * | 
 | 214 | 	 * b = k3 + k4 da | 
 | 215 | 	 */ | 
 | 216 | 	return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da) | 
 | 217 | 		/ (d3 - d2); | 
 | 218 | } | 
 | 219 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 220 | /* Update alpha and beta values once per RTT */ | 
 | 221 | static void update_params(struct sock *sk) | 
 | 222 | { | 
 | 223 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 224 | 	struct illinois *ca = inet_csk_ca(sk); | 
 | 225 |  | 
 | 226 | 	if (tp->snd_cwnd < win_thresh) { | 
 | 227 | 		ca->alpha = ALPHA_BASE; | 
 | 228 | 		ca->beta = BETA_BASE; | 
 | 229 | 	} else if (ca->cnt_rtt > 0) { | 
 | 230 | 		u32 dm = max_delay(ca); | 
 | 231 | 		u32 da = avg_delay(ca); | 
 | 232 |  | 
 | 233 | 		ca->alpha = alpha(ca, da, dm); | 
 | 234 | 		ca->beta = beta(da, dm); | 
 | 235 | 	} | 
 | 236 |  | 
 | 237 | 	rtt_reset(sk); | 
 | 238 | } | 
 | 239 |  | 
 | 240 | /* | 
 | 241 |  * In case of loss, reset to default values | 
 | 242 |  */ | 
 | 243 | static void tcp_illinois_state(struct sock *sk, u8 new_state) | 
 | 244 | { | 
 | 245 | 	struct illinois *ca = inet_csk_ca(sk); | 
 | 246 |  | 
 | 247 | 	if (new_state == TCP_CA_Loss) { | 
 | 248 | 		ca->alpha = ALPHA_BASE; | 
 | 249 | 		ca->beta = BETA_BASE; | 
 | 250 | 		ca->rtt_low = 0; | 
 | 251 | 		ca->rtt_above = 0; | 
 | 252 | 		rtt_reset(sk); | 
 | 253 | 	} | 
 | 254 | } | 
 | 255 |  | 
 | 256 | /* | 
 | 257 |  * Increase window in response to successful acknowledgment. | 
 | 258 |  */ | 
| Ilpo Järvinen | c3a05c6 | 2007-12-02 00:47:59 +0200 | [diff] [blame] | 259 | static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 260 | { | 
 | 261 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 262 | 	struct illinois *ca = inet_csk_ca(sk); | 
 | 263 |  | 
 | 264 | 	if (after(ack, ca->end_seq)) | 
 | 265 | 		update_params(sk); | 
 | 266 |  | 
 | 267 | 	/* RFC2861 only increase cwnd if fully utilized */ | 
 | 268 | 	if (!tcp_is_cwnd_limited(sk, in_flight)) | 
 | 269 | 		return; | 
 | 270 |  | 
 | 271 | 	/* In slow start */ | 
 | 272 | 	if (tp->snd_cwnd <= tp->snd_ssthresh) | 
 | 273 | 		tcp_slow_start(tp); | 
 | 274 |  | 
 | 275 | 	else { | 
 | 276 | 		u32 delta; | 
 | 277 |  | 
 | 278 | 		/* snd_cwnd_cnt is # of packets since last cwnd increment */ | 
 | 279 | 		tp->snd_cwnd_cnt += ca->acked; | 
 | 280 | 		ca->acked = 1; | 
 | 281 |  | 
 | 282 | 		/* This is close approximation of: | 
 | 283 | 		 * tp->snd_cwnd += alpha/tp->snd_cwnd | 
 | 284 | 		*/ | 
 | 285 | 		delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; | 
 | 286 | 		if (delta >= tp->snd_cwnd) { | 
 | 287 | 			tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, | 
 | 288 | 					   (u32) tp->snd_cwnd_clamp); | 
 | 289 | 			tp->snd_cwnd_cnt = 0; | 
 | 290 | 		} | 
 | 291 | 	} | 
 | 292 | } | 
 | 293 |  | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 294 | static u32 tcp_illinois_ssthresh(struct sock *sk) | 
 | 295 | { | 
 | 296 | 	struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 297 | 	struct illinois *ca = inet_csk_ca(sk); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 298 |  | 
 | 299 | 	/* Multiplicative decrease */ | 
| Stephen Hemminger | a357dde | 2007-11-30 01:10:55 +1100 | [diff] [blame] | 300 | 	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 301 | } | 
 | 302 |  | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 303 |  | 
 | 304 | /* Extract info for Tcp socket info provided via netlink. */ | 
 | 305 | static void tcp_illinois_info(struct sock *sk, u32 ext, | 
 | 306 | 			      struct sk_buff *skb) | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 307 | { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 308 | 	const struct illinois *ca = inet_csk_ca(sk); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 309 |  | 
 | 310 | 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { | 
 | 311 | 		struct tcpvegas_info info = { | 
 | 312 | 			.tcpv_enabled = 1, | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 313 | 			.tcpv_rttcnt = ca->cnt_rtt, | 
 | 314 | 			.tcpv_minrtt = ca->base_rtt, | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 315 | 		}; | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 316 |  | 
| Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 317 | 		if (info.tcpv_rttcnt > 0) { | 
 | 318 | 			u64 t = ca->sum_rtt; | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 319 |  | 
| Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 320 | 			do_div(t, info.tcpv_rttcnt); | 
 | 321 | 			info.tcpv_rtt = t; | 
 | 322 | 		} | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 323 | 		nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); | 
 | 324 | 	} | 
 | 325 | } | 
 | 326 |  | 
| Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 327 | static struct tcp_congestion_ops tcp_illinois __read_mostly = { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 328 | 	.flags		= TCP_CONG_RTT_STAMP, | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 329 | 	.init		= tcp_illinois_init, | 
 | 330 | 	.ssthresh	= tcp_illinois_ssthresh, | 
 | 331 | 	.min_cwnd	= tcp_reno_min_cwnd, | 
 | 332 | 	.cong_avoid	= tcp_illinois_cong_avoid, | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 333 | 	.set_state	= tcp_illinois_state, | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 334 | 	.get_info	= tcp_illinois_info, | 
 | 335 | 	.pkts_acked	= tcp_illinois_acked, | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 336 |  | 
 | 337 | 	.owner		= THIS_MODULE, | 
 | 338 | 	.name		= "illinois", | 
 | 339 | }; | 
 | 340 |  | 
 | 341 | static int __init tcp_illinois_register(void) | 
 | 342 | { | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 343 | 	BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE); | 
| Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 344 | 	return tcp_register_congestion_control(&tcp_illinois); | 
 | 345 | } | 
 | 346 |  | 
 | 347 | static void __exit tcp_illinois_unregister(void) | 
 | 348 | { | 
 | 349 | 	tcp_unregister_congestion_control(&tcp_illinois); | 
 | 350 | } | 
 | 351 |  | 
 | 352 | module_init(tcp_illinois_register); | 
 | 353 | module_exit(tcp_illinois_unregister); | 
 | 354 |  | 
 | 355 | MODULE_AUTHOR("Stephen Hemminger, Shao Liu"); | 
 | 356 | MODULE_LICENSE("GPL"); | 
 | 357 | MODULE_DESCRIPTION("TCP Illinois"); | 
| Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 358 | MODULE_VERSION("1.0"); |