| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * TCP Veno congestion control | 
|  | 3 | * | 
|  | 4 | * This is based on the congestion detection/avoidance scheme described in | 
|  | 5 | *    C. P. Fu, S. C. Liew. | 
|  | 6 | *    "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks." | 
|  | 7 | *    IEEE Journal on Selected Areas in Communication, | 
|  | 8 | *    Feb. 2003. | 
|  | 9 | * 	See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf | 
|  | 10 | */ | 
|  | 11 |  | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/skbuff.h> | 
|  | 15 | #include <linux/inet_diag.h> | 
|  | 16 |  | 
|  | 17 | #include <net/tcp.h> | 
|  | 18 |  | 
|  | 19 | /* Default values of the Veno variables, in fixed-point representation | 
|  | 20 | * with V_PARAM_SHIFT bits to the right of the binary point. | 
|  | 21 | */ | 
|  | 22 | #define V_PARAM_SHIFT 1 | 
|  | 23 | static const int beta = 3 << V_PARAM_SHIFT; | 
|  | 24 |  | 
|  | 25 | /* Veno variables */ | 
|  | 26 | struct veno { | 
|  | 27 | u8 doing_veno_now;	/* if true, do veno for this rtt */ | 
|  | 28 | u16 cntrtt;		/* # of rtts measured within last rtt */ | 
|  | 29 | u32 minrtt;		/* min of rtts measured within last rtt (in usec) */ | 
|  | 30 | u32 basertt;		/* the min of all Veno rtt measurements seen (in usec) */ | 
|  | 31 | u32 inc;		/* decide whether to increase cwnd */ | 
|  | 32 | u32 diff;		/* calculate the diff rate */ | 
|  | 33 | }; | 
|  | 34 |  | 
|  | 35 | /* There are several situations when we must "re-start" Veno: | 
|  | 36 | * | 
|  | 37 | *  o when a connection is established | 
|  | 38 | *  o after an RTO | 
|  | 39 | *  o after fast recovery | 
|  | 40 | *  o when we send a packet and there is no outstanding | 
|  | 41 | *    unacknowledged data (restarting an idle connection) | 
|  | 42 | * | 
|  | 43 | */ | 
|  | 44 | static inline void veno_enable(struct sock *sk) | 
|  | 45 | { | 
|  | 46 | struct veno *veno = inet_csk_ca(sk); | 
|  | 47 |  | 
|  | 48 | /* turn on Veno */ | 
|  | 49 | veno->doing_veno_now = 1; | 
|  | 50 |  | 
|  | 51 | veno->minrtt = 0x7fffffff; | 
|  | 52 | } | 
|  | 53 |  | 
|  | 54 | static inline void veno_disable(struct sock *sk) | 
|  | 55 | { | 
|  | 56 | struct veno *veno = inet_csk_ca(sk); | 
|  | 57 |  | 
|  | 58 | /* turn off Veno */ | 
|  | 59 | veno->doing_veno_now = 0; | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | static void tcp_veno_init(struct sock *sk) | 
|  | 63 | { | 
|  | 64 | struct veno *veno = inet_csk_ca(sk); | 
|  | 65 |  | 
|  | 66 | veno->basertt = 0x7fffffff; | 
|  | 67 | veno->inc = 1; | 
|  | 68 | veno_enable(sk); | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | /* Do rtt sampling needed for Veno. */ | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 72 | static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 73 | { | 
|  | 74 | struct veno *veno = inet_csk_ca(sk); | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 75 | u32 vrtt; | 
|  | 76 |  | 
|  | 77 | /* Never allow zero rtt or baseRTT */ | 
| YOSHIFUJI Hideaki | 84299b3 | 2007-04-24 16:21:38 -0700 | [diff] [blame] | 78 | vrtt = ktime_to_us(net_timedelta(last)) + 1; | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 79 |  | 
|  | 80 | /* Filter to find propagation delay: */ | 
|  | 81 | if (vrtt < veno->basertt) | 
|  | 82 | veno->basertt = vrtt; | 
|  | 83 |  | 
|  | 84 | /* Find the min rtt during the last rtt to find | 
|  | 85 | * the current prop. delay + queuing delay: | 
|  | 86 | */ | 
|  | 87 | veno->minrtt = min(veno->minrtt, vrtt); | 
|  | 88 | veno->cntrtt++; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | static void tcp_veno_state(struct sock *sk, u8 ca_state) | 
|  | 92 | { | 
|  | 93 | if (ca_state == TCP_CA_Open) | 
|  | 94 | veno_enable(sk); | 
|  | 95 | else | 
|  | 96 | veno_disable(sk); | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * If the connection is idle and we are restarting, | 
|  | 101 | * then we don't want to do any Veno calculations | 
|  | 102 | * until we get fresh rtt samples.  So when we | 
|  | 103 | * restart, we reset our Veno state to a clean | 
|  | 104 | * state. After we get acks for this flight of | 
|  | 105 | * packets, _then_ we can make Veno calculations | 
|  | 106 | * again. | 
|  | 107 | */ | 
|  | 108 | static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) | 
|  | 109 | { | 
|  | 110 | if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) | 
|  | 111 | tcp_veno_init(sk); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, | 
|  | 115 | u32 seq_rtt, u32 in_flight, int flag) | 
|  | 116 | { | 
|  | 117 | struct tcp_sock *tp = tcp_sk(sk); | 
|  | 118 | struct veno *veno = inet_csk_ca(sk); | 
|  | 119 |  | 
|  | 120 | if (!veno->doing_veno_now) | 
|  | 121 | return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); | 
|  | 122 |  | 
|  | 123 | /* limited by applications */ | 
|  | 124 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 
|  | 125 | return; | 
|  | 126 |  | 
|  | 127 | /* We do the Veno calculations only if we got enough rtt samples */ | 
|  | 128 | if (veno->cntrtt <= 2) { | 
|  | 129 | /* We don't have enough rtt samples to do the Veno | 
|  | 130 | * calculation, so we'll behave like Reno. | 
|  | 131 | */ | 
|  | 132 | tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); | 
|  | 133 | } else { | 
|  | 134 | u32 rtt, target_cwnd; | 
|  | 135 |  | 
|  | 136 | /* We have enough rtt samples, so, using the Veno | 
|  | 137 | * algorithm, we determine the state of the network. | 
|  | 138 | */ | 
|  | 139 |  | 
|  | 140 | rtt = veno->minrtt; | 
|  | 141 |  | 
|  | 142 | target_cwnd = ((tp->snd_cwnd * veno->basertt) | 
|  | 143 | << V_PARAM_SHIFT) / rtt; | 
|  | 144 |  | 
|  | 145 | veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd; | 
|  | 146 |  | 
|  | 147 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 
|  | 148 | /* Slow start.  */ | 
|  | 149 | tcp_slow_start(tp); | 
|  | 150 | } else { | 
|  | 151 | /* Congestion avoidance. */ | 
|  | 152 | if (veno->diff < beta) { | 
|  | 153 | /* In the "non-congestive state", increase cwnd | 
|  | 154 | *  every rtt. | 
|  | 155 | */ | 
|  | 156 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 
|  | 157 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 
|  | 158 | tp->snd_cwnd++; | 
|  | 159 | tp->snd_cwnd_cnt = 0; | 
|  | 160 | } else | 
|  | 161 | tp->snd_cwnd_cnt++; | 
|  | 162 | } else { | 
|  | 163 | /* In the "congestive state", increase cwnd | 
|  | 164 | * every other rtt. | 
|  | 165 | */ | 
|  | 166 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 
|  | 167 | if (veno->inc | 
|  | 168 | && tp->snd_cwnd < | 
|  | 169 | tp->snd_cwnd_clamp) { | 
|  | 170 | tp->snd_cwnd++; | 
|  | 171 | veno->inc = 0; | 
|  | 172 | } else | 
|  | 173 | veno->inc = 1; | 
|  | 174 | tp->snd_cwnd_cnt = 0; | 
|  | 175 | } else | 
|  | 176 | tp->snd_cwnd_cnt++; | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | } | 
|  | 180 | if (tp->snd_cwnd < 2) | 
|  | 181 | tp->snd_cwnd = 2; | 
|  | 182 | else if (tp->snd_cwnd > tp->snd_cwnd_clamp) | 
|  | 183 | tp->snd_cwnd = tp->snd_cwnd_clamp; | 
|  | 184 | } | 
|  | 185 | /* Wipe the slate clean for the next rtt. */ | 
|  | 186 | /* veno->cntrtt = 0; */ | 
|  | 187 | veno->minrtt = 0x7fffffff; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* Veno MD phase */ | 
|  | 191 | static u32 tcp_veno_ssthresh(struct sock *sk) | 
|  | 192 | { | 
|  | 193 | const struct tcp_sock *tp = tcp_sk(sk); | 
|  | 194 | struct veno *veno = inet_csk_ca(sk); | 
|  | 195 |  | 
|  | 196 | if (veno->diff < beta) | 
|  | 197 | /* in "non-congestive state", cut cwnd by 1/5 */ | 
|  | 198 | return max(tp->snd_cwnd * 4 / 5, 2U); | 
|  | 199 | else | 
|  | 200 | /* in "congestive state", cut cwnd by 1/2 */ | 
|  | 201 | return max(tp->snd_cwnd >> 1U, 2U); | 
|  | 202 | } | 
|  | 203 |  | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 204 | static struct tcp_congestion_ops tcp_veno = { | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 205 | .flags		= TCP_CONG_RTT_STAMP, | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 206 | .init		= tcp_veno_init, | 
|  | 207 | .ssthresh	= tcp_veno_ssthresh, | 
|  | 208 | .cong_avoid	= tcp_veno_cong_avoid, | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 209 | .pkts_acked	= tcp_veno_pkts_acked, | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 210 | .set_state	= tcp_veno_state, | 
|  | 211 | .cwnd_event	= tcp_veno_cwnd_event, | 
|  | 212 |  | 
|  | 213 | .owner		= THIS_MODULE, | 
|  | 214 | .name		= "veno", | 
|  | 215 | }; | 
|  | 216 |  | 
|  | 217 | static int __init tcp_veno_register(void) | 
|  | 218 | { | 
| Alexey Dobriyan | 74975d4 | 2006-08-25 17:10:33 -0700 | [diff] [blame] | 219 | BUILD_BUG_ON(sizeof(struct veno) > ICSK_CA_PRIV_SIZE); | 
| Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 220 | tcp_register_congestion_control(&tcp_veno); | 
|  | 221 | return 0; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | static void __exit tcp_veno_unregister(void) | 
|  | 225 | { | 
|  | 226 | tcp_unregister_congestion_control(&tcp_veno); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | module_init(tcp_veno_register); | 
|  | 230 | module_exit(tcp_veno_unregister); | 
|  | 231 |  | 
|  | 232 | MODULE_AUTHOR("Bin Zhou, Cheng Peng Fu"); | 
|  | 233 | MODULE_LICENSE("GPL"); | 
|  | 234 | MODULE_DESCRIPTION("TCP Veno"); |