| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Plugable TCP congestion control support and newReno | 
|  | 3 | * congestion control. | 
|  | 4 | * Based on ideas from I/O scheduler suport and Web100. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> | 
|  | 7 | */ | 
|  | 8 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 9 | #include <linux/module.h> | 
|  | 10 | #include <linux/mm.h> | 
|  | 11 | #include <linux/types.h> | 
|  | 12 | #include <linux/list.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/gfp.h> | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 14 | #include <net/tcp.h> | 
|  | 15 |  | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 16 | int sysctl_tcp_max_ssthresh = 0; | 
|  | 17 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 18 | static DEFINE_SPINLOCK(tcp_cong_list_lock); | 
|  | 19 | static LIST_HEAD(tcp_cong_list); | 
|  | 20 |  | 
|  | 21 | /* Simple linear search, don't expect many entries! */ | 
|  | 22 | static struct tcp_congestion_ops *tcp_ca_find(const char *name) | 
|  | 23 | { | 
|  | 24 | struct tcp_congestion_ops *e; | 
|  | 25 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 26 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 27 | if (strcmp(e->name, name) == 0) | 
|  | 28 | return e; | 
|  | 29 | } | 
|  | 30 |  | 
|  | 31 | return NULL; | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | /* | 
| Robert P. J. Day | d08df60 | 2007-02-17 19:07:33 +0100 | [diff] [blame] | 35 | * Attach new congestion control algorithm to the list | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 36 | * of available options. | 
|  | 37 | */ | 
|  | 38 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) | 
|  | 39 | { | 
|  | 40 | int ret = 0; | 
|  | 41 |  | 
|  | 42 | /* all algorithms must implement ssthresh and cong_avoid ops */ | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 43 | if (!ca->ssthresh || !ca->cong_avoid) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 44 | printk(KERN_ERR "TCP %s does not implement required ops\n", | 
|  | 45 | ca->name); | 
|  | 46 | return -EINVAL; | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | spin_lock(&tcp_cong_list_lock); | 
|  | 50 | if (tcp_ca_find(ca->name)) { | 
|  | 51 | printk(KERN_NOTICE "TCP %s already registered\n", ca->name); | 
|  | 52 | ret = -EEXIST; | 
|  | 53 | } else { | 
| Stephen Hemminger | 3d2573f | 2006-09-24 20:11:58 -0700 | [diff] [blame] | 54 | list_add_tail_rcu(&ca->list, &tcp_cong_list); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 55 | printk(KERN_INFO "TCP %s registered\n", ca->name); | 
|  | 56 | } | 
|  | 57 | spin_unlock(&tcp_cong_list_lock); | 
|  | 58 |  | 
|  | 59 | return ret; | 
|  | 60 | } | 
|  | 61 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); | 
|  | 62 |  | 
|  | 63 | /* | 
|  | 64 | * Remove congestion control algorithm, called from | 
|  | 65 | * the module's remove function.  Module ref counts are used | 
|  | 66 | * to ensure that this can't be done till all sockets using | 
|  | 67 | * that method are closed. | 
|  | 68 | */ | 
|  | 69 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) | 
|  | 70 | { | 
|  | 71 | spin_lock(&tcp_cong_list_lock); | 
|  | 72 | list_del_rcu(&ca->list); | 
|  | 73 | spin_unlock(&tcp_cong_list_lock); | 
|  | 74 | } | 
|  | 75 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); | 
|  | 76 |  | 
|  | 77 | /* Assign choice of congestion control. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 78 | void tcp_init_congestion_control(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 79 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 80 | struct inet_connection_sock *icsk = inet_csk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 81 | struct tcp_congestion_ops *ca; | 
|  | 82 |  | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 83 | /* if no choice made yet assign the current value set as default */ | 
|  | 84 | if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { | 
|  | 85 | rcu_read_lock(); | 
|  | 86 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
|  | 87 | if (try_module_get(ca->owner)) { | 
|  | 88 | icsk->icsk_ca_ops = ca; | 
|  | 89 | break; | 
|  | 90 | } | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 91 |  | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 92 | /* fallback to next available */ | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 93 | } | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 94 | rcu_read_unlock(); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 95 | } | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 96 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 97 | if (icsk->icsk_ca_ops->init) | 
|  | 98 | icsk->icsk_ca_ops->init(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 99 | } | 
|  | 100 |  | 
|  | 101 | /* Manage refcounts on socket close. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 102 | void tcp_cleanup_congestion_control(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 103 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 104 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|  | 105 |  | 
|  | 106 | if (icsk->icsk_ca_ops->release) | 
|  | 107 | icsk->icsk_ca_ops->release(sk); | 
|  | 108 | module_put(icsk->icsk_ca_ops->owner); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 109 | } | 
|  | 110 |  | 
|  | 111 | /* Used by sysctl to change default congestion control */ | 
|  | 112 | int tcp_set_default_congestion_control(const char *name) | 
|  | 113 | { | 
|  | 114 | struct tcp_congestion_ops *ca; | 
|  | 115 | int ret = -ENOENT; | 
|  | 116 |  | 
|  | 117 | spin_lock(&tcp_cong_list_lock); | 
|  | 118 | ca = tcp_ca_find(name); | 
| Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 119 | #ifdef CONFIG_MODULES | 
| Eric Paris | a8f80e8 | 2009-08-13 09:44:51 -0400 | [diff] [blame] | 120 | if (!ca && capable(CAP_NET_ADMIN)) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 121 | spin_unlock(&tcp_cong_list_lock); | 
|  | 122 |  | 
|  | 123 | request_module("tcp_%s", name); | 
|  | 124 | spin_lock(&tcp_cong_list_lock); | 
|  | 125 | ca = tcp_ca_find(name); | 
|  | 126 | } | 
|  | 127 | #endif | 
|  | 128 |  | 
|  | 129 | if (ca) { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 130 | ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */ | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 131 | list_move(&ca->list, &tcp_cong_list); | 
|  | 132 | ret = 0; | 
|  | 133 | } | 
|  | 134 | spin_unlock(&tcp_cong_list_lock); | 
|  | 135 |  | 
|  | 136 | return ret; | 
|  | 137 | } | 
|  | 138 |  | 
| Stephen Hemminger | b1736a7 | 2006-10-31 17:31:33 -0800 | [diff] [blame] | 139 | /* Set default value from kernel configuration at bootup */ | 
|  | 140 | static int __init tcp_congestion_default(void) | 
|  | 141 | { | 
|  | 142 | return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); | 
|  | 143 | } | 
|  | 144 | late_initcall(tcp_congestion_default); | 
|  | 145 |  | 
|  | 146 |  | 
| Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 147 | /* Build string with list of available congestion control values */ | 
|  | 148 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) | 
|  | 149 | { | 
|  | 150 | struct tcp_congestion_ops *ca; | 
|  | 151 | size_t offs = 0; | 
|  | 152 |  | 
|  | 153 | rcu_read_lock(); | 
|  | 154 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
|  | 155 | offs += snprintf(buf + offs, maxlen - offs, | 
|  | 156 | "%s%s", | 
|  | 157 | offs == 0 ? "" : " ", ca->name); | 
|  | 158 |  | 
|  | 159 | } | 
|  | 160 | rcu_read_unlock(); | 
|  | 161 | } | 
|  | 162 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 163 | /* Get current default congestion control */ | 
|  | 164 | void tcp_get_default_congestion_control(char *name) | 
|  | 165 | { | 
|  | 166 | struct tcp_congestion_ops *ca; | 
|  | 167 | /* We will always have reno... */ | 
|  | 168 | BUG_ON(list_empty(&tcp_cong_list)); | 
|  | 169 |  | 
|  | 170 | rcu_read_lock(); | 
|  | 171 | ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); | 
|  | 172 | strncpy(name, ca->name, TCP_CA_NAME_MAX); | 
|  | 173 | rcu_read_unlock(); | 
|  | 174 | } | 
|  | 175 |  | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 176 | /* Built list of non-restricted congestion control values */ | 
|  | 177 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) | 
|  | 178 | { | 
|  | 179 | struct tcp_congestion_ops *ca; | 
|  | 180 | size_t offs = 0; | 
|  | 181 |  | 
|  | 182 | *buf = '\0'; | 
|  | 183 | rcu_read_lock(); | 
|  | 184 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 185 | if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 186 | continue; | 
|  | 187 | offs += snprintf(buf + offs, maxlen - offs, | 
|  | 188 | "%s%s", | 
|  | 189 | offs == 0 ? "" : " ", ca->name); | 
|  | 190 |  | 
|  | 191 | } | 
|  | 192 | rcu_read_unlock(); | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | /* Change list of non-restricted congestion control */ | 
|  | 196 | int tcp_set_allowed_congestion_control(char *val) | 
|  | 197 | { | 
|  | 198 | struct tcp_congestion_ops *ca; | 
|  | 199 | char *clone, *name; | 
|  | 200 | int ret = 0; | 
|  | 201 |  | 
|  | 202 | clone = kstrdup(val, GFP_USER); | 
|  | 203 | if (!clone) | 
|  | 204 | return -ENOMEM; | 
|  | 205 |  | 
|  | 206 | spin_lock(&tcp_cong_list_lock); | 
|  | 207 | /* pass 1 check for bad entries */ | 
|  | 208 | while ((name = strsep(&clone, " ")) && *name) { | 
|  | 209 | ca = tcp_ca_find(name); | 
|  | 210 | if (!ca) { | 
|  | 211 | ret = -ENOENT; | 
|  | 212 | goto out; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 216 | /* pass 2 clear old values */ | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 217 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 218 | ca->flags &= ~TCP_CONG_NON_RESTRICTED; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 219 |  | 
|  | 220 | /* pass 3 mark as allowed */ | 
|  | 221 | while ((name = strsep(&val, " ")) && *name) { | 
|  | 222 | ca = tcp_ca_find(name); | 
|  | 223 | WARN_ON(!ca); | 
|  | 224 | if (ca) | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 225 | ca->flags |= TCP_CONG_NON_RESTRICTED; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 226 | } | 
|  | 227 | out: | 
|  | 228 | spin_unlock(&tcp_cong_list_lock); | 
|  | 229 |  | 
|  | 230 | return ret; | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 234 | /* Change congestion control for socket */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 235 | int tcp_set_congestion_control(struct sock *sk, const char *name) | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 236 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 237 | struct inet_connection_sock *icsk = inet_csk(sk); | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 238 | struct tcp_congestion_ops *ca; | 
|  | 239 | int err = 0; | 
|  | 240 |  | 
|  | 241 | rcu_read_lock(); | 
|  | 242 | ca = tcp_ca_find(name); | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 243 |  | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 244 | /* no change asking for existing value */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 245 | if (ca == icsk->icsk_ca_ops) | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 246 | goto out; | 
|  | 247 |  | 
| Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 248 | #ifdef CONFIG_MODULES | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 249 | /* not found attempt to autoload module */ | 
| Eric Paris | a8f80e8 | 2009-08-13 09:44:51 -0400 | [diff] [blame] | 250 | if (!ca && capable(CAP_NET_ADMIN)) { | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 251 | rcu_read_unlock(); | 
|  | 252 | request_module("tcp_%s", name); | 
|  | 253 | rcu_read_lock(); | 
|  | 254 | ca = tcp_ca_find(name); | 
|  | 255 | } | 
|  | 256 | #endif | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 257 | if (!ca) | 
|  | 258 | err = -ENOENT; | 
|  | 259 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 260 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 261 | err = -EPERM; | 
|  | 262 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 263 | else if (!try_module_get(ca->owner)) | 
|  | 264 | err = -EBUSY; | 
|  | 265 |  | 
|  | 266 | else { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 267 | tcp_cleanup_congestion_control(sk); | 
|  | 268 | icsk->icsk_ca_ops = ca; | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 269 |  | 
|  | 270 | if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 271 | icsk->icsk_ca_ops->init(sk); | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 272 | } | 
|  | 273 | out: | 
|  | 274 | rcu_read_unlock(); | 
|  | 275 | return err; | 
|  | 276 | } | 
|  | 277 |  | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 278 | /* RFC2861 Check whether we are limited by application or congestion window | 
|  | 279 | * This is the inverse of cwnd check in tcp_tso_should_defer | 
|  | 280 | */ | 
|  | 281 | int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | 
|  | 282 | { | 
|  | 283 | const struct tcp_sock *tp = tcp_sk(sk); | 
|  | 284 | u32 left; | 
|  | 285 |  | 
|  | 286 | if (in_flight >= tp->snd_cwnd) | 
|  | 287 | return 1; | 
|  | 288 |  | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 289 | left = tp->snd_cwnd - in_flight; | 
| John Heffner | ce447eb | 2008-04-29 03:13:02 -0700 | [diff] [blame] | 290 | if (sk_can_gso(sk) && | 
| John Heffner | 246eb2a | 2008-04-29 03:13:52 -0700 | [diff] [blame] | 291 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && | 
|  | 292 | left * tp->mss_cache < sk->sk_gso_max_size) | 
| John Heffner | ce447eb | 2008-04-29 03:13:02 -0700 | [diff] [blame] | 293 | return 1; | 
|  | 294 | return left <= tcp_max_burst(tp); | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 295 | } | 
|  | 296 | EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 297 |  | 
|  | 298 | /* | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 299 | * Slow start is used when congestion window is less than slow start | 
|  | 300 | * threshold. This version implements the basic RFC2581 version | 
|  | 301 | * and optionally supports: | 
|  | 302 | * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh | 
|  | 303 | *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 304 | */ | 
|  | 305 | void tcp_slow_start(struct tcp_sock *tp) | 
|  | 306 | { | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 307 | int cnt; /* increase in packets */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 308 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 309 | /* RFC3465: ABC Slow start | 
|  | 310 | * Increase only after a full MSS of bytes is acked | 
|  | 311 | * | 
|  | 312 | * TCP sender SHOULD increase cwnd by the number of | 
|  | 313 | * previously unacknowledged bytes ACKed by each incoming | 
|  | 314 | * acknowledgment, provided the increase is not more than L | 
|  | 315 | */ | 
|  | 316 | if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) | 
|  | 317 | return; | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 318 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 319 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) | 
|  | 320 | cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 321 | else | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 322 | cnt = tp->snd_cwnd;			/* exponential increase */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 323 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 324 | /* RFC3465: ABC | 
|  | 325 | * We MAY increase by 2 if discovered delayed ack | 
|  | 326 | */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 327 | if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) | 
|  | 328 | cnt <<= 1; | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 329 | tp->bytes_acked = 0; | 
|  | 330 |  | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 331 | tp->snd_cwnd_cnt += cnt; | 
|  | 332 | while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 
|  | 333 | tp->snd_cwnd_cnt -= tp->snd_cwnd; | 
|  | 334 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 335 | tp->snd_cwnd++; | 
|  | 336 | } | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 337 | } | 
|  | 338 | EXPORT_SYMBOL_GPL(tcp_slow_start); | 
|  | 339 |  | 
| Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 340 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ | 
|  | 341 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) | 
|  | 342 | { | 
|  | 343 | if (tp->snd_cwnd_cnt >= w) { | 
|  | 344 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 345 | tp->snd_cwnd++; | 
|  | 346 | tp->snd_cwnd_cnt = 0; | 
|  | 347 | } else { | 
|  | 348 | tp->snd_cwnd_cnt++; | 
|  | 349 | } | 
|  | 350 | } | 
|  | 351 | EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); | 
|  | 352 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 353 | /* | 
|  | 354 | * TCP Reno congestion control | 
|  | 355 | * This is special case used for fallback as well. | 
|  | 356 | */ | 
|  | 357 | /* This is Jacobson's slow start and congestion avoidance. | 
|  | 358 | * SIGCOMM '88, p. 328. | 
|  | 359 | */ | 
| Ilpo Järvinen | c3a05c6 | 2007-12-02 00:47:59 +0200 | [diff] [blame] | 360 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 361 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 362 | struct tcp_sock *tp = tcp_sk(sk); | 
|  | 363 |  | 
| Stephen Hemminger | f4805ed | 2005-11-10 16:53:30 -0800 | [diff] [blame] | 364 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 365 | return; | 
|  | 366 |  | 
| Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 367 | /* In "safe" area, increase. */ | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 368 | if (tp->snd_cwnd <= tp->snd_ssthresh) | 
| Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 369 | tcp_slow_start(tp); | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 370 |  | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 371 | /* In dangerous area, increase slowly. */ | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 372 | else if (sysctl_tcp_abc) { | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 373 | /* RFC3465: Appropriate Byte Count | 
|  | 374 | * increase once for each full cwnd acked | 
|  | 375 | */ | 
|  | 376 | if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { | 
|  | 377 | tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; | 
|  | 378 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 379 | tp->snd_cwnd++; | 
|  | 380 | } | 
|  | 381 | } else { | 
| Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 382 | tcp_cong_avoid_ai(tp, tp->snd_cwnd); | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 383 | } | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 384 | } | 
|  | 385 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); | 
|  | 386 |  | 
|  | 387 | /* Slow start threshold is half the congestion window (min 2) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 388 | u32 tcp_reno_ssthresh(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 389 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 390 | const struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 391 | return max(tp->snd_cwnd >> 1U, 2U); | 
|  | 392 | } | 
|  | 393 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); | 
|  | 394 |  | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 395 | /* Lower bound on congestion window with halving. */ | 
|  | 396 | u32 tcp_reno_min_cwnd(const struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 397 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 398 | const struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 399 | return tp->snd_ssthresh/2; | 
|  | 400 | } | 
|  | 401 | EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); | 
|  | 402 |  | 
|  | 403 | struct tcp_congestion_ops tcp_reno = { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 404 | .flags		= TCP_CONG_NON_RESTRICTED, | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 405 | .name		= "reno", | 
|  | 406 | .owner		= THIS_MODULE, | 
|  | 407 | .ssthresh	= tcp_reno_ssthresh, | 
|  | 408 | .cong_avoid	= tcp_reno_cong_avoid, | 
|  | 409 | .min_cwnd	= tcp_reno_min_cwnd, | 
|  | 410 | }; | 
|  | 411 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 412 | /* Initial congestion control used (until SYN) | 
|  | 413 | * really reno under another name so we can tell difference | 
|  | 414 | * during tcp_set_default_congestion_control | 
|  | 415 | */ | 
|  | 416 | struct tcp_congestion_ops tcp_init_congestion_ops  = { | 
|  | 417 | .name		= "", | 
|  | 418 | .owner		= THIS_MODULE, | 
|  | 419 | .ssthresh	= tcp_reno_ssthresh, | 
|  | 420 | .cong_avoid	= tcp_reno_cong_avoid, | 
|  | 421 | .min_cwnd	= tcp_reno_min_cwnd, | 
|  | 422 | }; | 
|  | 423 | EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); |