| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Plugable TCP congestion control support and newReno | 
|  | 3 | * congestion control. | 
|  | 4 | * Based on ideas from I/O scheduler suport and Web100. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> | 
|  | 7 | */ | 
|  | 8 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 9 | #include <linux/module.h> | 
|  | 10 | #include <linux/mm.h> | 
|  | 11 | #include <linux/types.h> | 
|  | 12 | #include <linux/list.h> | 
|  | 13 | #include <net/tcp.h> | 
|  | 14 |  | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 15 | int sysctl_tcp_max_ssthresh = 0; | 
|  | 16 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 17 | static DEFINE_SPINLOCK(tcp_cong_list_lock); | 
|  | 18 | static LIST_HEAD(tcp_cong_list); | 
|  | 19 |  | 
|  | 20 | /* Simple linear search, don't expect many entries! */ | 
|  | 21 | static struct tcp_congestion_ops *tcp_ca_find(const char *name) | 
|  | 22 | { | 
|  | 23 | struct tcp_congestion_ops *e; | 
|  | 24 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 25 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 26 | if (strcmp(e->name, name) == 0) | 
|  | 27 | return e; | 
|  | 28 | } | 
|  | 29 |  | 
|  | 30 | return NULL; | 
|  | 31 | } | 
|  | 32 |  | 
|  | 33 | /* | 
| Robert P. J. Day | d08df60 | 2007-02-17 19:07:33 +0100 | [diff] [blame] | 34 | * Attach new congestion control algorithm to the list | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 35 | * of available options. | 
|  | 36 | */ | 
|  | 37 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) | 
|  | 38 | { | 
|  | 39 | int ret = 0; | 
|  | 40 |  | 
|  | 41 | /* all algorithms must implement ssthresh and cong_avoid ops */ | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 42 | if (!ca->ssthresh || !ca->cong_avoid) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 43 | printk(KERN_ERR "TCP %s does not implement required ops\n", | 
|  | 44 | ca->name); | 
|  | 45 | return -EINVAL; | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | spin_lock(&tcp_cong_list_lock); | 
|  | 49 | if (tcp_ca_find(ca->name)) { | 
|  | 50 | printk(KERN_NOTICE "TCP %s already registered\n", ca->name); | 
|  | 51 | ret = -EEXIST; | 
|  | 52 | } else { | 
| Stephen Hemminger | 3d2573f | 2006-09-24 20:11:58 -0700 | [diff] [blame] | 53 | list_add_tail_rcu(&ca->list, &tcp_cong_list); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 54 | printk(KERN_INFO "TCP %s registered\n", ca->name); | 
|  | 55 | } | 
|  | 56 | spin_unlock(&tcp_cong_list_lock); | 
|  | 57 |  | 
|  | 58 | return ret; | 
|  | 59 | } | 
|  | 60 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); | 
|  | 61 |  | 
|  | 62 | /* | 
|  | 63 | * Remove congestion control algorithm, called from | 
|  | 64 | * the module's remove function.  Module ref counts are used | 
|  | 65 | * to ensure that this can't be done till all sockets using | 
|  | 66 | * that method are closed. | 
|  | 67 | */ | 
|  | 68 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) | 
|  | 69 | { | 
|  | 70 | spin_lock(&tcp_cong_list_lock); | 
|  | 71 | list_del_rcu(&ca->list); | 
|  | 72 | spin_unlock(&tcp_cong_list_lock); | 
|  | 73 | } | 
|  | 74 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); | 
|  | 75 |  | 
|  | 76 | /* Assign choice of congestion control. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 77 | void tcp_init_congestion_control(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 78 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 79 | struct inet_connection_sock *icsk = inet_csk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 80 | struct tcp_congestion_ops *ca; | 
|  | 81 |  | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 82 | /* if no choice made yet assign the current value set as default */ | 
|  | 83 | if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { | 
|  | 84 | rcu_read_lock(); | 
|  | 85 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
|  | 86 | if (try_module_get(ca->owner)) { | 
|  | 87 | icsk->icsk_ca_ops = ca; | 
|  | 88 | break; | 
|  | 89 | } | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 90 |  | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 91 | /* fallback to next available */ | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 92 | } | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 93 | rcu_read_unlock(); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 94 | } | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 95 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 96 | if (icsk->icsk_ca_ops->init) | 
|  | 97 | icsk->icsk_ca_ops->init(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 98 | } | 
|  | 99 |  | 
|  | 100 | /* Manage refcounts on socket close. */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 101 | void tcp_cleanup_congestion_control(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 102 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 103 | struct inet_connection_sock *icsk = inet_csk(sk); | 
|  | 104 |  | 
|  | 105 | if (icsk->icsk_ca_ops->release) | 
|  | 106 | icsk->icsk_ca_ops->release(sk); | 
|  | 107 | module_put(icsk->icsk_ca_ops->owner); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 108 | } | 
|  | 109 |  | 
|  | 110 | /* Used by sysctl to change default congestion control */ | 
|  | 111 | int tcp_set_default_congestion_control(const char *name) | 
|  | 112 | { | 
|  | 113 | struct tcp_congestion_ops *ca; | 
|  | 114 | int ret = -ENOENT; | 
|  | 115 |  | 
|  | 116 | spin_lock(&tcp_cong_list_lock); | 
|  | 117 | ca = tcp_ca_find(name); | 
| Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 118 | #ifdef CONFIG_MODULES | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 119 | if (!ca && capable(CAP_SYS_MODULE)) { | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 120 | spin_unlock(&tcp_cong_list_lock); | 
|  | 121 |  | 
|  | 122 | request_module("tcp_%s", name); | 
|  | 123 | spin_lock(&tcp_cong_list_lock); | 
|  | 124 | ca = tcp_ca_find(name); | 
|  | 125 | } | 
|  | 126 | #endif | 
|  | 127 |  | 
|  | 128 | if (ca) { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 129 | ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */ | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 130 | list_move(&ca->list, &tcp_cong_list); | 
|  | 131 | ret = 0; | 
|  | 132 | } | 
|  | 133 | spin_unlock(&tcp_cong_list_lock); | 
|  | 134 |  | 
|  | 135 | return ret; | 
|  | 136 | } | 
|  | 137 |  | 
| Stephen Hemminger | b1736a7 | 2006-10-31 17:31:33 -0800 | [diff] [blame] | 138 | /* Set default value from kernel configuration at bootup */ | 
|  | 139 | static int __init tcp_congestion_default(void) | 
|  | 140 | { | 
|  | 141 | return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); | 
|  | 142 | } | 
|  | 143 | late_initcall(tcp_congestion_default); | 
|  | 144 |  | 
|  | 145 |  | 
| Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 146 | /* Build string with list of available congestion control values */ | 
|  | 147 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) | 
|  | 148 | { | 
|  | 149 | struct tcp_congestion_ops *ca; | 
|  | 150 | size_t offs = 0; | 
|  | 151 |  | 
|  | 152 | rcu_read_lock(); | 
|  | 153 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
|  | 154 | offs += snprintf(buf + offs, maxlen - offs, | 
|  | 155 | "%s%s", | 
|  | 156 | offs == 0 ? "" : " ", ca->name); | 
|  | 157 |  | 
|  | 158 | } | 
|  | 159 | rcu_read_unlock(); | 
|  | 160 | } | 
|  | 161 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 162 | /* Get current default congestion control */ | 
|  | 163 | void tcp_get_default_congestion_control(char *name) | 
|  | 164 | { | 
|  | 165 | struct tcp_congestion_ops *ca; | 
|  | 166 | /* We will always have reno... */ | 
|  | 167 | BUG_ON(list_empty(&tcp_cong_list)); | 
|  | 168 |  | 
|  | 169 | rcu_read_lock(); | 
|  | 170 | ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); | 
|  | 171 | strncpy(name, ca->name, TCP_CA_NAME_MAX); | 
|  | 172 | rcu_read_unlock(); | 
|  | 173 | } | 
|  | 174 |  | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 175 | /* Built list of non-restricted congestion control values */ | 
|  | 176 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) | 
|  | 177 | { | 
|  | 178 | struct tcp_congestion_ops *ca; | 
|  | 179 | size_t offs = 0; | 
|  | 180 |  | 
|  | 181 | *buf = '\0'; | 
|  | 182 | rcu_read_lock(); | 
|  | 183 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 184 | if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 185 | continue; | 
|  | 186 | offs += snprintf(buf + offs, maxlen - offs, | 
|  | 187 | "%s%s", | 
|  | 188 | offs == 0 ? "" : " ", ca->name); | 
|  | 189 |  | 
|  | 190 | } | 
|  | 191 | rcu_read_unlock(); | 
|  | 192 | } | 
|  | 193 |  | 
|  | 194 | /* Change list of non-restricted congestion control */ | 
|  | 195 | int tcp_set_allowed_congestion_control(char *val) | 
|  | 196 | { | 
|  | 197 | struct tcp_congestion_ops *ca; | 
|  | 198 | char *clone, *name; | 
|  | 199 | int ret = 0; | 
|  | 200 |  | 
|  | 201 | clone = kstrdup(val, GFP_USER); | 
|  | 202 | if (!clone) | 
|  | 203 | return -ENOMEM; | 
|  | 204 |  | 
|  | 205 | spin_lock(&tcp_cong_list_lock); | 
|  | 206 | /* pass 1 check for bad entries */ | 
|  | 207 | while ((name = strsep(&clone, " ")) && *name) { | 
|  | 208 | ca = tcp_ca_find(name); | 
|  | 209 | if (!ca) { | 
|  | 210 | ret = -ENOENT; | 
|  | 211 | goto out; | 
|  | 212 | } | 
|  | 213 | } | 
|  | 214 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 215 | /* pass 2 clear old values */ | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 216 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 217 | ca->flags &= ~TCP_CONG_NON_RESTRICTED; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 218 |  | 
|  | 219 | /* pass 3 mark as allowed */ | 
|  | 220 | while ((name = strsep(&val, " ")) && *name) { | 
|  | 221 | ca = tcp_ca_find(name); | 
|  | 222 | WARN_ON(!ca); | 
|  | 223 | if (ca) | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 224 | ca->flags |= TCP_CONG_NON_RESTRICTED; | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 225 | } | 
|  | 226 | out: | 
|  | 227 | spin_unlock(&tcp_cong_list_lock); | 
|  | 228 |  | 
|  | 229 | return ret; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 233 | /* Change congestion control for socket */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 234 | int tcp_set_congestion_control(struct sock *sk, const char *name) | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 235 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 236 | struct inet_connection_sock *icsk = inet_csk(sk); | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 237 | struct tcp_congestion_ops *ca; | 
|  | 238 | int err = 0; | 
|  | 239 |  | 
|  | 240 | rcu_read_lock(); | 
|  | 241 | ca = tcp_ca_find(name); | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 242 |  | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 243 | /* no change asking for existing value */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 244 | if (ca == icsk->icsk_ca_ops) | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 245 | goto out; | 
|  | 246 |  | 
| Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 247 | #ifdef CONFIG_MODULES | 
| Stephen Hemminger | 35bfbc9 | 2006-11-09 16:36:36 -0800 | [diff] [blame] | 248 | /* not found attempt to autoload module */ | 
|  | 249 | if (!ca && capable(CAP_SYS_MODULE)) { | 
|  | 250 | rcu_read_unlock(); | 
|  | 251 | request_module("tcp_%s", name); | 
|  | 252 | rcu_read_lock(); | 
|  | 253 | ca = tcp_ca_find(name); | 
|  | 254 | } | 
|  | 255 | #endif | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 256 | if (!ca) | 
|  | 257 | err = -ENOENT; | 
|  | 258 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 259 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 260 | err = -EPERM; | 
|  | 261 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 262 | else if (!try_module_get(ca->owner)) | 
|  | 263 | err = -EBUSY; | 
|  | 264 |  | 
|  | 265 | else { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 266 | tcp_cleanup_congestion_control(sk); | 
|  | 267 | icsk->icsk_ca_ops = ca; | 
| Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 268 |  | 
|  | 269 | if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 270 | icsk->icsk_ca_ops->init(sk); | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 271 | } | 
|  | 272 | out: | 
|  | 273 | rcu_read_unlock(); | 
|  | 274 | return err; | 
|  | 275 | } | 
|  | 276 |  | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 277 | /* RFC2861 Check whether we are limited by application or congestion window | 
|  | 278 | * This is the inverse of cwnd check in tcp_tso_should_defer | 
|  | 279 | */ | 
|  | 280 | int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | 
|  | 281 | { | 
|  | 282 | const struct tcp_sock *tp = tcp_sk(sk); | 
|  | 283 | u32 left; | 
|  | 284 |  | 
|  | 285 | if (in_flight >= tp->snd_cwnd) | 
|  | 286 | return 1; | 
|  | 287 |  | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 288 | left = tp->snd_cwnd - in_flight; | 
| John Heffner | ce447eb | 2008-04-29 03:13:02 -0700 | [diff] [blame] | 289 | if (sk_can_gso(sk) && | 
| John Heffner | 246eb2a | 2008-04-29 03:13:52 -0700 | [diff] [blame] | 290 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && | 
|  | 291 | left * tp->mss_cache < sk->sk_gso_max_size) | 
| John Heffner | ce447eb | 2008-04-29 03:13:02 -0700 | [diff] [blame] | 292 | return 1; | 
|  | 293 | return left <= tcp_max_burst(tp); | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 294 | } | 
|  | 295 | EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 296 |  | 
|  | 297 | /* | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 298 | * Slow start is used when congestion window is less than slow start | 
|  | 299 | * threshold. This version implements the basic RFC2581 version | 
|  | 300 | * and optionally supports: | 
|  | 301 | * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh | 
|  | 302 | *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 303 | */ | 
|  | 304 | void tcp_slow_start(struct tcp_sock *tp) | 
|  | 305 | { | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 306 | int cnt; /* increase in packets */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 307 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 308 | /* RFC3465: ABC Slow start | 
|  | 309 | * Increase only after a full MSS of bytes is acked | 
|  | 310 | * | 
|  | 311 | * TCP sender SHOULD increase cwnd by the number of | 
|  | 312 | * previously unacknowledged bytes ACKed by each incoming | 
|  | 313 | * acknowledgment, provided the increase is not more than L | 
|  | 314 | */ | 
|  | 315 | if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) | 
|  | 316 | return; | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 317 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 318 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) | 
|  | 319 | cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 320 | else | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 321 | cnt = tp->snd_cwnd;			/* exponential increase */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 322 |  | 
| Stephen Hemminger | a02ba04 | 2007-05-17 00:04:18 -0700 | [diff] [blame] | 323 | /* RFC3465: ABC | 
|  | 324 | * We MAY increase by 2 if discovered delayed ack | 
|  | 325 | */ | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 326 | if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) | 
|  | 327 | cnt <<= 1; | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 328 | tp->bytes_acked = 0; | 
|  | 329 |  | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 330 | tp->snd_cwnd_cnt += cnt; | 
|  | 331 | while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 
|  | 332 | tp->snd_cwnd_cnt -= tp->snd_cwnd; | 
|  | 333 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 334 | tp->snd_cwnd++; | 
|  | 335 | } | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 336 | } | 
|  | 337 | EXPORT_SYMBOL_GPL(tcp_slow_start); | 
|  | 338 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 339 | /* | 
|  | 340 | * TCP Reno congestion control | 
|  | 341 | * This is special case used for fallback as well. | 
|  | 342 | */ | 
|  | 343 | /* This is Jacobson's slow start and congestion avoidance. | 
|  | 344 | * SIGCOMM '88, p. 328. | 
|  | 345 | */ | 
| Ilpo Järvinen | c3a05c6 | 2007-12-02 00:47:59 +0200 | [diff] [blame] | 346 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 347 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 348 | struct tcp_sock *tp = tcp_sk(sk); | 
|  | 349 |  | 
| Stephen Hemminger | f4805ed | 2005-11-10 16:53:30 -0800 | [diff] [blame] | 350 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 351 | return; | 
|  | 352 |  | 
| Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 353 | /* In "safe" area, increase. */ | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 354 | if (tp->snd_cwnd <= tp->snd_ssthresh) | 
| Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 355 | tcp_slow_start(tp); | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 356 |  | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 357 | /* In dangerous area, increase slowly. */ | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 358 | else if (sysctl_tcp_abc) { | 
| YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 359 | /* RFC3465: Appropriate Byte Count | 
|  | 360 | * increase once for each full cwnd acked | 
|  | 361 | */ | 
|  | 362 | if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { | 
|  | 363 | tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; | 
|  | 364 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 365 | tp->snd_cwnd++; | 
|  | 366 | } | 
|  | 367 | } else { | 
|  | 368 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */ | 
|  | 369 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 
|  | 370 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 371 | tp->snd_cwnd++; | 
|  | 372 | tp->snd_cwnd_cnt = 0; | 
|  | 373 | } else | 
|  | 374 | tp->snd_cwnd_cnt++; | 
|  | 375 | } | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 376 | } | 
|  | 377 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); | 
|  | 378 |  | 
|  | 379 | /* Slow start threshold is half the congestion window (min 2) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 380 | u32 tcp_reno_ssthresh(struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 381 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 382 | const struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 383 | return max(tp->snd_cwnd >> 1U, 2U); | 
|  | 384 | } | 
|  | 385 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); | 
|  | 386 |  | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 387 | /* Lower bound on congestion window with halving. */ | 
|  | 388 | u32 tcp_reno_min_cwnd(const struct sock *sk) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 389 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 390 | const struct tcp_sock *tp = tcp_sk(sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 391 | return tp->snd_ssthresh/2; | 
|  | 392 | } | 
|  | 393 | EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); | 
|  | 394 |  | 
|  | 395 | struct tcp_congestion_ops tcp_reno = { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 396 | .flags		= TCP_CONG_NON_RESTRICTED, | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 397 | .name		= "reno", | 
|  | 398 | .owner		= THIS_MODULE, | 
|  | 399 | .ssthresh	= tcp_reno_ssthresh, | 
|  | 400 | .cong_avoid	= tcp_reno_cong_avoid, | 
|  | 401 | .min_cwnd	= tcp_reno_min_cwnd, | 
|  | 402 | }; | 
|  | 403 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 404 | /* Initial congestion control used (until SYN) | 
|  | 405 | * really reno under another name so we can tell difference | 
|  | 406 | * during tcp_set_default_congestion_control | 
|  | 407 | */ | 
|  | 408 | struct tcp_congestion_ops tcp_init_congestion_ops  = { | 
|  | 409 | .name		= "", | 
|  | 410 | .owner		= THIS_MODULE, | 
|  | 411 | .ssthresh	= tcp_reno_ssthresh, | 
|  | 412 | .cong_avoid	= tcp_reno_cong_avoid, | 
|  | 413 | .min_cwnd	= tcp_reno_min_cwnd, | 
|  | 414 | }; | 
|  | 415 | EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); |