| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Plugable TCP congestion control support and newReno | 
 | 3 |  * congestion control. | 
 | 4 |  * Based on ideas from I/O scheduler suport and Web100. | 
 | 5 |  * | 
 | 6 |  * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> | 
 | 7 |  */ | 
 | 8 |  | 
| Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 9 | #define pr_fmt(fmt) "TCP: " fmt | 
 | 10 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 11 | #include <linux/module.h> | 
 | 12 | #include <linux/mm.h> | 
 | 13 | #include <linux/types.h> | 
 | 14 | #include <linux/list.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/gfp.h> | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 16 | #include <net/tcp.h> | 
 | 17 |  | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 18 | int sysctl_tcp_max_ssthresh = 0; | 
 | 19 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 20 | static DEFINE_SPINLOCK(tcp_cong_list_lock); | 
 | 21 | static LIST_HEAD(tcp_cong_list); | 
 | 22 |  | 
 | 23 | /* Simple linear search, don't expect many entries! */ | 
 | 24 | static struct tcp_congestion_ops *tcp_ca_find(const char *name) | 
 | 25 | { | 
 | 26 | 	struct tcp_congestion_ops *e; | 
 | 27 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 28 | 	list_for_each_entry_rcu(e, &tcp_cong_list, list) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 29 | 		if (strcmp(e->name, name) == 0) | 
 | 30 | 			return e; | 
 | 31 | 	} | 
 | 32 |  | 
 | 33 | 	return NULL; | 
 | 34 | } | 
 | 35 |  | 
 | 36 | /* | 
| Robert P. J. Day | d08df60 | 2007-02-17 19:07:33 +0100 | [diff] [blame] | 37 |  * Attach new congestion control algorithm to the list | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 38 |  * of available options. | 
 | 39 |  */ | 
 | 40 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) | 
 | 41 | { | 
 | 42 | 	int ret = 0; | 
 | 43 |  | 
 | 44 | 	/* all algorithms must implement ssthresh and cong_avoid ops */ | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 45 | 	if (!ca->ssthresh || !ca->cong_avoid) { | 
| Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 46 | 		pr_err("%s does not implement required ops\n", ca->name); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 47 | 		return -EINVAL; | 
 | 48 | 	} | 
 | 49 |  | 
 | 50 | 	spin_lock(&tcp_cong_list_lock); | 
 | 51 | 	if (tcp_ca_find(ca->name)) { | 
| Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 52 | 		pr_notice("%s already registered\n", ca->name); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 53 | 		ret = -EEXIST; | 
 | 54 | 	} else { | 
| Stephen Hemminger | 3d2573f | 2006-09-24 20:11:58 -0700 | [diff] [blame] | 55 | 		list_add_tail_rcu(&ca->list, &tcp_cong_list); | 
| Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 56 | 		pr_info("%s registered\n", ca->name); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 57 | 	} | 
 | 58 | 	spin_unlock(&tcp_cong_list_lock); | 
 | 59 |  | 
 | 60 | 	return ret; | 
 | 61 | } | 
 | 62 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); | 
 | 63 |  | 
 | 64 | /* | 
 | 65 |  * Remove congestion control algorithm, called from | 
 | 66 |  * the module's remove function.  Module ref counts are used | 
 | 67 |  * to ensure that this can't be done till all sockets using | 
 | 68 |  * that method are closed. | 
 | 69 |  */ | 
 | 70 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) | 
 | 71 | { | 
 | 72 | 	spin_lock(&tcp_cong_list_lock); | 
 | 73 | 	list_del_rcu(&ca->list); | 
 | 74 | 	spin_unlock(&tcp_cong_list_lock); | 
 | 75 | } | 
 | 76 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); | 
 | 77 |  | 
 | 78 | /* Assign choice of congestion control. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 79 | void tcp_init_congestion_control(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 80 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 81 | 	struct inet_connection_sock *icsk = inet_csk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 82 | 	struct tcp_congestion_ops *ca; | 
 | 83 |  | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 84 | 	/* if no choice made yet assign the current value set as default */ | 
 | 85 | 	if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { | 
 | 86 | 		rcu_read_lock(); | 
 | 87 | 		list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
 | 88 | 			if (try_module_get(ca->owner)) { | 
 | 89 | 				icsk->icsk_ca_ops = ca; | 
 | 90 | 				break; | 
 | 91 | 			} | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 92 |  | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 93 | 			/* fallback to next available */ | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 94 | 		} | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 95 | 		rcu_read_unlock(); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 96 | 	} | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 97 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 98 | 	if (icsk->icsk_ca_ops->init) | 
 | 99 | 		icsk->icsk_ca_ops->init(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 100 | } | 
 | 101 |  | 
 | 102 | /* Manage refcounts on socket close. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 103 | void tcp_cleanup_congestion_control(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 104 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 105 | 	struct inet_connection_sock *icsk = inet_csk(sk); | 
 | 106 |  | 
 | 107 | 	if (icsk->icsk_ca_ops->release) | 
 | 108 | 		icsk->icsk_ca_ops->release(sk); | 
 | 109 | 	module_put(icsk->icsk_ca_ops->owner); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 110 | } | 
 | 111 |  | 
 | 112 | /* Used by sysctl to change default congestion control */ | 
 | 113 | int tcp_set_default_congestion_control(const char *name) | 
 | 114 | { | 
 | 115 | 	struct tcp_congestion_ops *ca; | 
 | 116 | 	int ret = -ENOENT; | 
 | 117 |  | 
 | 118 | 	spin_lock(&tcp_cong_list_lock); | 
 | 119 | 	ca = tcp_ca_find(name); | 
| Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 120 | #ifdef CONFIG_MODULES | 
| Eric Paris | a8f80e8 | 2009-08-13 09:44:51 -0400 | [diff] [blame] | 121 | 	if (!ca && capable(CAP_NET_ADMIN)) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 122 | 		spin_unlock(&tcp_cong_list_lock); | 
 | 123 |  | 
 | 124 | 		request_module("tcp_%s", name); | 
 | 125 | 		spin_lock(&tcp_cong_list_lock); | 
 | 126 | 		ca = tcp_ca_find(name); | 
 | 127 | 	} | 
 | 128 | #endif | 
 | 129 |  | 
 | 130 | 	if (ca) { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 131 | 		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */ | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 132 | 		list_move(&ca->list, &tcp_cong_list); | 
 | 133 | 		ret = 0; | 
 | 134 | 	} | 
 | 135 | 	spin_unlock(&tcp_cong_list_lock); | 
 | 136 |  | 
 | 137 | 	return ret; | 
 | 138 | } | 
 | 139 |  | 
| Stephen Hemminger | b1736a7 | 2006-10-31 17:31:33 -0800 | [diff] [blame] | 140 | /* Set default value from kernel configuration at bootup */ | 
 | 141 | static int __init tcp_congestion_default(void) | 
 | 142 | { | 
 | 143 | 	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); | 
 | 144 | } | 
 | 145 | late_initcall(tcp_congestion_default); | 
 | 146 |  | 
 | 147 |  | 
| Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 148 | /* Build string with list of available congestion control values */ | 
 | 149 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) | 
 | 150 | { | 
 | 151 | 	struct tcp_congestion_ops *ca; | 
 | 152 | 	size_t offs = 0; | 
 | 153 |  | 
 | 154 | 	rcu_read_lock(); | 
 | 155 | 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
 | 156 | 		offs += snprintf(buf + offs, maxlen - offs, | 
 | 157 | 				 "%s%s", | 
 | 158 | 				 offs == 0 ? "" : " ", ca->name); | 
 | 159 |  | 
 | 160 | 	} | 
 | 161 | 	rcu_read_unlock(); | 
 | 162 | } | 
 | 163 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 164 | /* Get current default congestion control */ | 
 | 165 | void tcp_get_default_congestion_control(char *name) | 
 | 166 | { | 
 | 167 | 	struct tcp_congestion_ops *ca; | 
 | 168 | 	/* We will always have reno... */ | 
 | 169 | 	BUG_ON(list_empty(&tcp_cong_list)); | 
 | 170 |  | 
 | 171 | 	rcu_read_lock(); | 
 | 172 | 	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); | 
 | 173 | 	strncpy(name, ca->name, TCP_CA_NAME_MAX); | 
 | 174 | 	rcu_read_unlock(); | 
 | 175 | } | 
 | 176 |  | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 177 | /* Built list of non-restricted congestion control values */ | 
 | 178 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) | 
 | 179 | { | 
 | 180 | 	struct tcp_congestion_ops *ca; | 
 | 181 | 	size_t offs = 0; | 
 | 182 |  | 
 | 183 | 	*buf = '\0'; | 
 | 184 | 	rcu_read_lock(); | 
 | 185 | 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 186 | 		if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 187 | 			continue; | 
 | 188 | 		offs += snprintf(buf + offs, maxlen - offs, | 
 | 189 | 				 "%s%s", | 
 | 190 | 				 offs == 0 ? "" : " ", ca->name); | 
 | 191 |  | 
 | 192 | 	} | 
 | 193 | 	rcu_read_unlock(); | 
 | 194 | } | 
 | 195 |  | 
 | 196 | /* Change list of non-restricted congestion control */ | 
 | 197 | int tcp_set_allowed_congestion_control(char *val) | 
 | 198 | { | 
 | 199 | 	struct tcp_congestion_ops *ca; | 
| Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 200 | 	char *saved_clone, *clone, *name; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 201 | 	int ret = 0; | 
 | 202 |  | 
| Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 203 | 	saved_clone = clone = kstrdup(val, GFP_USER); | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 204 | 	if (!clone) | 
 | 205 | 		return -ENOMEM; | 
 | 206 |  | 
 | 207 | 	spin_lock(&tcp_cong_list_lock); | 
 | 208 | 	/* pass 1 check for bad entries */ | 
 | 209 | 	while ((name = strsep(&clone, " ")) && *name) { | 
 | 210 | 		ca = tcp_ca_find(name); | 
 | 211 | 		if (!ca) { | 
 | 212 | 			ret = -ENOENT; | 
 | 213 | 			goto out; | 
 | 214 | 		} | 
 | 215 | 	} | 
 | 216 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 217 | 	/* pass 2 clear old values */ | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 218 | 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 219 | 		ca->flags &= ~TCP_CONG_NON_RESTRICTED; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 220 |  | 
 | 221 | 	/* pass 3 mark as allowed */ | 
 | 222 | 	while ((name = strsep(&val, " ")) && *name) { | 
 | 223 | 		ca = tcp_ca_find(name); | 
 | 224 | 		WARN_ON(!ca); | 
 | 225 | 		if (ca) | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 226 | 			ca->flags |= TCP_CONG_NON_RESTRICTED; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 227 | 	} | 
 | 228 | out: | 
 | 229 | 	spin_unlock(&tcp_cong_list_lock); | 
| Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 230 | 	kfree(saved_clone); | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 231 |  | 
 | 232 | 	return ret; | 
 | 233 | } | 
 | 234 |  | 
 | 235 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 236 | /* Change congestion control for socket */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 237 | int tcp_set_congestion_control(struct sock *sk, const char *name) | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 238 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 239 | 	struct inet_connection_sock *icsk = inet_csk(sk); | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 240 | 	struct tcp_congestion_ops *ca; | 
 | 241 | 	int err = 0; | 
 | 242 |  | 
 | 243 | 	rcu_read_lock(); | 
 | 244 | 	ca = tcp_ca_find(name); | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 245 |  | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 246 | 	/* no change asking for existing value */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 247 | 	if (ca == icsk->icsk_ca_ops) | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 248 | 		goto out; | 
 | 249 |  | 
| Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 250 | #ifdef CONFIG_MODULES | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 251 | 	/* not found attempt to autoload module */ | 
| Eric Paris | a8f80e8 | 2009-08-13 09:44:51 -0400 | [diff] [blame] | 252 | 	if (!ca && capable(CAP_NET_ADMIN)) { | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 253 | 		rcu_read_unlock(); | 
 | 254 | 		request_module("tcp_%s", name); | 
 | 255 | 		rcu_read_lock(); | 
 | 256 | 		ca = tcp_ca_find(name); | 
 | 257 | 	} | 
 | 258 | #endif | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 259 | 	if (!ca) | 
 | 260 | 		err = -ENOENT; | 
 | 261 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 262 | 	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 263 | 		err = -EPERM; | 
 | 264 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 265 | 	else if (!try_module_get(ca->owner)) | 
 | 266 | 		err = -EBUSY; | 
 | 267 |  | 
 | 268 | 	else { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 269 | 		tcp_cleanup_congestion_control(sk); | 
 | 270 | 		icsk->icsk_ca_ops = ca; | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 271 |  | 
 | 272 | 		if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 273 | 			icsk->icsk_ca_ops->init(sk); | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 274 | 	} | 
 | 275 |  out: | 
 | 276 | 	rcu_read_unlock(); | 
 | 277 | 	return err; | 
 | 278 | } | 
 | 279 |  | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 280 | /* RFC2861 Check whether we are limited by application or congestion window | 
 | 281 |  * This is the inverse of cwnd check in tcp_tso_should_defer | 
 | 282 |  */ | 
 | 283 | int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | 
 | 284 | { | 
 | 285 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
 | 286 | 	u32 left; | 
 | 287 |  | 
 | 288 | 	if (in_flight >= tp->snd_cwnd) | 
 | 289 | 		return 1; | 
 | 290 |  | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 291 | 	left = tp->snd_cwnd - in_flight; | 
| John Heffner | ce447eb | 2008-04-29 03:13:02 -0700 | [diff] [blame] | 292 | 	if (sk_can_gso(sk) && | 
| John Heffner | 246eb2a | 2008-04-29 03:13:52 -0700 | [diff] [blame] | 293 | 	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && | 
 | 294 | 	    left * tp->mss_cache < sk->sk_gso_max_size) | 
| John Heffner | ce447eb | 2008-04-29 03:13:02 -0700 | [diff] [blame] | 295 | 		return 1; | 
| Neal Cardwell | 6b5a5c0 | 2011-11-21 17:15:14 +0000 | [diff] [blame] | 296 | 	return left <= tcp_max_tso_deferred_mss(tp); | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 297 | } | 
 | 298 | EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 299 |  | 
 | 300 | /* | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 301 |  * Slow start is used when congestion window is less than slow start | 
 | 302 |  * threshold. This version implements the basic RFC2581 version | 
 | 303 |  * and optionally supports: | 
 | 304 |  * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh | 
 | 305 |  *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 306 |  */ | 
 | 307 | void tcp_slow_start(struct tcp_sock *tp) | 
 | 308 | { | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 309 | 	int cnt; /* increase in packets */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 310 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 311 | 	/* RFC3465: ABC Slow start | 
 | 312 | 	 * Increase only after a full MSS of bytes is acked | 
 | 313 | 	 * | 
 | 314 | 	 * TCP sender SHOULD increase cwnd by the number of | 
 | 315 | 	 * previously unacknowledged bytes ACKed by each incoming | 
 | 316 | 	 * acknowledgment, provided the increase is not more than L | 
 | 317 | 	 */ | 
 | 318 | 	if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) | 
 | 319 | 		return; | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 320 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 321 | 	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) | 
 | 322 | 		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 323 | 	else | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 324 | 		cnt = tp->snd_cwnd;			/* exponential increase */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 325 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 326 | 	/* RFC3465: ABC | 
 | 327 | 	 * We MAY increase by 2 if discovered delayed ack | 
 | 328 | 	 */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 329 | 	if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) | 
 | 330 | 		cnt <<= 1; | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 331 | 	tp->bytes_acked = 0; | 
 | 332 |  | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 333 | 	tp->snd_cwnd_cnt += cnt; | 
 | 334 | 	while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 
 | 335 | 		tp->snd_cwnd_cnt -= tp->snd_cwnd; | 
 | 336 | 		if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
 | 337 | 			tp->snd_cwnd++; | 
 | 338 | 	} | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 339 | } | 
 | 340 | EXPORT_SYMBOL_GPL(tcp_slow_start); | 
 | 341 |  | 
| Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 342 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ | 
 | 343 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) | 
 | 344 | { | 
 | 345 | 	if (tp->snd_cwnd_cnt >= w) { | 
 | 346 | 		if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
 | 347 | 			tp->snd_cwnd++; | 
 | 348 | 		tp->snd_cwnd_cnt = 0; | 
 | 349 | 	} else { | 
 | 350 | 		tp->snd_cwnd_cnt++; | 
 | 351 | 	} | 
 | 352 | } | 
 | 353 | EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); | 
 | 354 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 355 | /* | 
 | 356 |  * TCP Reno congestion control | 
 | 357 |  * This is special case used for fallback as well. | 
 | 358 |  */ | 
 | 359 | /* This is Jacobson's slow start and congestion avoidance. | 
 | 360 |  * SIGCOMM '88, p. 328. | 
 | 361 |  */ | 
| Ilpo Järvinen | c3a05c6 | 2007-12-02 00:47:59 +0200 | [diff] [blame] | 362 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 363 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 364 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 365 |  | 
| Stephen Hemminger | f4805ed | 2005-11-10 16:53:30 -0800 | [diff] [blame] | 366 | 	if (!tcp_is_cwnd_limited(sk, in_flight)) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 367 | 		return; | 
 | 368 |  | 
| Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 369 | 	/* In "safe" area, increase. */ | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 370 | 	if (tp->snd_cwnd <= tp->snd_ssthresh) | 
| Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 371 | 		tcp_slow_start(tp); | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 372 |  | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 373 | 	/* In dangerous area, increase slowly. */ | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 374 | 	else if (sysctl_tcp_abc) { | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 375 | 		/* RFC3465: Appropriate Byte Count | 
 | 376 | 		 * increase once for each full cwnd acked | 
 | 377 | 		 */ | 
 | 378 | 		if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { | 
 | 379 | 			tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; | 
 | 380 | 			if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
 | 381 | 				tp->snd_cwnd++; | 
 | 382 | 		} | 
 | 383 | 	} else { | 
| Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 384 | 		tcp_cong_avoid_ai(tp, tp->snd_cwnd); | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 385 | 	} | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 386 | } | 
 | 387 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); | 
 | 388 |  | 
 | 389 | /* Slow start threshold is half the congestion window (min 2) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 390 | u32 tcp_reno_ssthresh(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 391 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 392 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 393 | 	return max(tp->snd_cwnd >> 1U, 2U); | 
 | 394 | } | 
 | 395 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); | 
 | 396 |  | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 397 | /* Lower bound on congestion window with halving. */ | 
 | 398 | u32 tcp_reno_min_cwnd(const struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 399 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 400 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 401 | 	return tp->snd_ssthresh/2; | 
 | 402 | } | 
 | 403 | EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); | 
 | 404 |  | 
 | 405 | struct tcp_congestion_ops tcp_reno = { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 406 | 	.flags		= TCP_CONG_NON_RESTRICTED, | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 407 | 	.name		= "reno", | 
 | 408 | 	.owner		= THIS_MODULE, | 
 | 409 | 	.ssthresh	= tcp_reno_ssthresh, | 
 | 410 | 	.cong_avoid	= tcp_reno_cong_avoid, | 
 | 411 | 	.min_cwnd	= tcp_reno_min_cwnd, | 
 | 412 | }; | 
 | 413 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 414 | /* Initial congestion control used (until SYN) | 
 | 415 |  * really reno under another name so we can tell difference | 
 | 416 |  * during tcp_set_default_congestion_control | 
 | 417 |  */ | 
 | 418 | struct tcp_congestion_ops tcp_init_congestion_ops  = { | 
 | 419 | 	.name		= "", | 
 | 420 | 	.owner		= THIS_MODULE, | 
 | 421 | 	.ssthresh	= tcp_reno_ssthresh, | 
 | 422 | 	.cong_avoid	= tcp_reno_cong_avoid, | 
 | 423 | 	.min_cwnd	= tcp_reno_min_cwnd, | 
 | 424 | }; | 
 | 425 | EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); |