| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * net/sched/sch_netem.c	Network emulator | 
|  | 3 | * | 
|  | 4 | * 		This program is free software; you can redistribute it and/or | 
|  | 5 | * 		modify it under the terms of the GNU General Public License | 
|  | 6 | * 		as published by the Free Software Foundation; either version | 
| Stephen Hemminger | 798b6b1 | 2006-10-22 20:16:57 -0700 | [diff] [blame] | 7 | * 		2 of the License. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * | 
|  | 9 | *  		Many of the algorithms and ideas for this came from | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 10 | *		NIST Net which is not copyrighted. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * | 
|  | 12 | * Authors:	Stephen Hemminger <shemminger@osdl.org> | 
|  | 13 | *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> | 
|  | 14 | */ | 
|  | 15 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/types.h> | 
|  | 18 | #include <linux/kernel.h> | 
|  | 19 | #include <linux/errno.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/skbuff.h> | 
|  | 21 | #include <linux/rtnetlink.h> | 
|  | 22 |  | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 23 | #include <net/netlink.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <net/pkt_sched.h> | 
|  | 25 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 26 | #define VERSION "1.2" | 
| Stephen Hemminger | eb229c4 | 2005-11-03 13:49:01 -0800 | [diff] [blame] | 27 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | /*	Network Emulation Queuing algorithm. | 
|  | 29 | ==================================== | 
|  | 30 |  | 
|  | 31 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based | 
|  | 32 | Network Emulation Tool | 
|  | 33 | [2] Luigi Rizzo, DummyNet for FreeBSD | 
|  | 34 |  | 
|  | 35 | ---------------------------------------------------------------- | 
|  | 36 |  | 
|  | 37 | This started out as a simple way to delay outgoing packets to | 
|  | 38 | test TCP but has grown to include most of the functionality | 
|  | 39 | of a full blown network emulator like NISTnet. It can delay | 
|  | 40 | packets and add random jitter (and correlation). The random | 
|  | 41 | distribution can be loaded from a table as well to provide | 
|  | 42 | normal, Pareto, or experimental curves. Packet loss, | 
|  | 43 | duplication, and reordering can also be emulated. | 
|  | 44 |  | 
|  | 45 | This qdisc does not do classification that can be handled in | 
|  | 46 | layering other disciplines.  It does not need to do bandwidth | 
|  | 47 | control either since that can be handled by using token | 
|  | 48 | bucket or other rate control. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | */ | 
|  | 50 |  | 
|  | 51 | struct netem_sched_data { | 
|  | 52 | struct Qdisc	*qdisc; | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 53 | struct qdisc_watchdog watchdog; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 55 | psched_tdiff_t latency; | 
|  | 56 | psched_tdiff_t jitter; | 
|  | 57 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | u32 loss; | 
|  | 59 | u32 limit; | 
|  | 60 | u32 counter; | 
|  | 61 | u32 gap; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | u32 duplicate; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 63 | u32 reorder; | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 64 | u32 corrupt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
|  | 66 | struct crndstate { | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 67 | u32 last; | 
|  | 68 | u32 rho; | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 69 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  | 
|  | 71 | struct disttable { | 
|  | 72 | u32  size; | 
|  | 73 | s16 table[0]; | 
|  | 74 | } *delay_dist; | 
|  | 75 | }; | 
|  | 76 |  | 
|  | 77 | /* Time stamp put into socket buffer control block */ | 
|  | 78 | struct netem_skb_cb { | 
|  | 79 | psched_time_t	time_to_send; | 
|  | 80 | }; | 
|  | 81 |  | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 82 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) | 
|  | 83 | { | 
| Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 84 | BUILD_BUG_ON(sizeof(skb->cb) < | 
|  | 85 | sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); | 
|  | 86 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 87 | } | 
|  | 88 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | /* init_crandom - initialize correlated random number generator | 
|  | 90 | * Use entropy source for initial seed. | 
|  | 91 | */ | 
|  | 92 | static void init_crandom(struct crndstate *state, unsigned long rho) | 
|  | 93 | { | 
|  | 94 | state->rho = rho; | 
|  | 95 | state->last = net_random(); | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | /* get_crandom - correlated random number generator | 
|  | 99 | * Next number depends on last value. | 
|  | 100 | * rho is scaled to avoid floating point. | 
|  | 101 | */ | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 102 | static u32 get_crandom(struct crndstate *state) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | { | 
|  | 104 | u64 value, rho; | 
|  | 105 | unsigned long answer; | 
|  | 106 |  | 
| Stephen Hemminger | bb2f8cc | 2007-03-23 00:12:09 -0700 | [diff] [blame] | 107 | if (state->rho == 0)	/* no correlation */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | return net_random(); | 
|  | 109 |  | 
|  | 110 | value = net_random(); | 
|  | 111 | rho = (u64)state->rho + 1; | 
|  | 112 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; | 
|  | 113 | state->last = answer; | 
|  | 114 | return answer; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | /* tabledist - return a pseudo-randomly distributed value with mean mu and | 
|  | 118 | * std deviation sigma.  Uses table lookup to approximate the desired | 
|  | 119 | * distribution, and a uniformly-distributed pseudo-random source. | 
|  | 120 | */ | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 121 | static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, | 
|  | 122 | struct crndstate *state, | 
|  | 123 | const struct disttable *dist) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 125 | psched_tdiff_t x; | 
|  | 126 | long t; | 
|  | 127 | u32 rnd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 |  | 
|  | 129 | if (sigma == 0) | 
|  | 130 | return mu; | 
|  | 131 |  | 
|  | 132 | rnd = get_crandom(state); | 
|  | 133 |  | 
|  | 134 | /* default uniform distribution */ | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 135 | if (dist == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | return (rnd % (2*sigma)) - sigma + mu; | 
|  | 137 |  | 
|  | 138 | t = dist->table[rnd % dist->size]; | 
|  | 139 | x = (sigma % NETEM_DIST_SCALE) * t; | 
|  | 140 | if (x >= 0) | 
|  | 141 | x += NETEM_DIST_SCALE/2; | 
|  | 142 | else | 
|  | 143 | x -= NETEM_DIST_SCALE/2; | 
|  | 144 |  | 
|  | 145 | return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | 
|  | 146 | } | 
|  | 147 |  | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 148 | /* | 
|  | 149 | * Insert one skb into qdisc. | 
|  | 150 | * Note: parent depends on return value to account for queue length. | 
|  | 151 | * 	NET_XMIT_DROP: queue length didn't change. | 
|  | 152 | *      NET_XMIT_SUCCESS: one skb was queued. | 
|  | 153 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 
|  | 155 | { | 
|  | 156 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Guillaume Chazarain | 89e1df7 | 2006-07-21 14:45:25 -0700 | [diff] [blame] | 157 | /* We don't fill cb now as skb_unshare() may invalidate it */ | 
|  | 158 | struct netem_skb_cb *cb; | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 159 | struct sk_buff *skb2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | int ret; | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 161 | int count = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 |  | 
| Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 163 | pr_debug("netem_enqueue skb=%p\n", skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 165 | /* Random duplication */ | 
|  | 166 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | 
|  | 167 | ++count; | 
|  | 168 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | /* Random packet drop 0 => none, ~0 => all */ | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 170 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) | 
|  | 171 | --count; | 
|  | 172 |  | 
|  | 173 | if (count == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | sch->qstats.drops++; | 
|  | 175 | kfree_skb(skb); | 
| Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 176 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } | 
|  | 178 |  | 
| David S. Miller | 4e8a520 | 2006-10-22 21:00:33 -0700 | [diff] [blame] | 179 | skb_orphan(skb); | 
|  | 180 |  | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 181 | /* | 
|  | 182 | * If we need to duplicate packet, then re-insert at top of the | 
|  | 183 | * qdisc tree, since parent queuer expects that only one | 
|  | 184 | * skb will be queued. | 
|  | 185 | */ | 
|  | 186 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 187 | struct Qdisc *rootq = qdisc_root(sch); | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 188 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | 
|  | 189 | q->duplicate = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 |  | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 191 | qdisc_enqueue_root(skb2, rootq); | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 192 | q->duplicate = dupsave; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | } | 
|  | 194 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 195 | /* | 
|  | 196 | * Randomized packet corruption. | 
|  | 197 | * Make copy if needed since we are modifying | 
|  | 198 | * If packet is going to be hardware checksummed, then | 
|  | 199 | * do it now in software before we mangle it. | 
|  | 200 | */ | 
|  | 201 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | 
|  | 202 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 203 | || (skb->ip_summed == CHECKSUM_PARTIAL | 
|  | 204 | && skb_checksum_help(skb))) { | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 205 | sch->qstats.drops++; | 
|  | 206 | return NET_XMIT_DROP; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | 
|  | 210 | } | 
|  | 211 |  | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 212 | cb = netem_skb_cb(skb); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 213 | if (q->gap == 0 		/* not doing reordering */ | 
|  | 214 | || q->counter < q->gap 	/* inside last reordering gap */ | 
|  | 215 | || q->reorder < get_crandom(&q->reorder_cor)) { | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 216 | psched_time_t now; | 
| Stephen Hemminger | 07aaa11 | 2005-11-03 13:43:07 -0800 | [diff] [blame] | 217 | psched_tdiff_t delay; | 
|  | 218 |  | 
|  | 219 | delay = tabledist(q->latency, q->jitter, | 
|  | 220 | &q->delay_cor, q->delay_dist); | 
|  | 221 |  | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 222 | now = psched_get_time(); | 
| Patrick McHardy | 7c59e25 | 2007-03-23 11:27:45 -0700 | [diff] [blame] | 223 | cb->time_to_send = now + delay; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | ++q->counter; | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 225 | ret = qdisc_enqueue(skb, q->qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | } else { | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 227 | /* | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 228 | * Do re-ordering by putting one out of N packets at the front | 
|  | 229 | * of the queue. | 
|  | 230 | */ | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 231 | cb->time_to_send = psched_get_time(); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 232 | q->counter = 0; | 
| Jarek Poplawski | 8ba25da | 2008-11-02 00:36:03 -0700 | [diff] [blame] | 233 |  | 
|  | 234 | __skb_queue_head(&q->qdisc->q, skb); | 
|  | 235 | q->qdisc->qstats.backlog += qdisc_pkt_len(skb); | 
|  | 236 | q->qdisc->qstats.requeues++; | 
|  | 237 | ret = NET_XMIT_SUCCESS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | } | 
|  | 239 |  | 
|  | 240 | if (likely(ret == NET_XMIT_SUCCESS)) { | 
|  | 241 | sch->q.qlen++; | 
| Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 242 | sch->bstats.bytes += qdisc_pkt_len(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | sch->bstats.packets++; | 
| Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 244 | } else if (net_xmit_drop_count(ret)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | sch->qstats.drops++; | 
| Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 246 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  | 
| Stephen Hemminger | d5d75cd | 2005-05-03 16:24:57 -0700 | [diff] [blame] | 248 | pr_debug("netem: enqueue ret %d\n", ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | return ret; | 
|  | 250 | } | 
|  | 251 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | static unsigned int netem_drop(struct Qdisc* sch) | 
|  | 253 | { | 
|  | 254 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 6d037a2 | 2006-03-20 19:00:49 -0800 | [diff] [blame] | 255 | unsigned int len = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 |  | 
| Patrick McHardy | 6d037a2 | 2006-03-20 19:00:49 -0800 | [diff] [blame] | 257 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | sch->q.qlen--; | 
|  | 259 | sch->qstats.drops++; | 
|  | 260 | } | 
|  | 261 | return len; | 
|  | 262 | } | 
|  | 263 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) | 
|  | 265 | { | 
|  | 266 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 267 | struct sk_buff *skb; | 
|  | 268 |  | 
| Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 269 | if (sch->flags & TCQ_F_THROTTLED) | 
|  | 270 | return NULL; | 
|  | 271 |  | 
| Jarek Poplawski | 03c05f0 | 2008-10-31 00:46:19 -0700 | [diff] [blame] | 272 | skb = q->qdisc->ops->peek(q->qdisc); | 
| Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 273 | if (skb) { | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 274 | const struct netem_skb_cb *cb = netem_skb_cb(skb); | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 275 | psched_time_t now = psched_get_time(); | 
| Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 276 |  | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 277 | /* if more time remaining? */ | 
| Patrick McHardy | 104e087 | 2007-03-23 11:28:07 -0700 | [diff] [blame] | 278 | if (cb->time_to_send <= now) { | 
| Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 279 | skb = qdisc_dequeue_peeked(q->qdisc); | 
|  | 280 | if (unlikely(!skb)) | 
| Jarek Poplawski | 03c05f0 | 2008-10-31 00:46:19 -0700 | [diff] [blame] | 281 | return NULL; | 
|  | 282 |  | 
| Jarek Poplawski | 8caf153 | 2009-04-17 10:08:49 +0000 | [diff] [blame] | 283 | #ifdef CONFIG_NET_CLS_ACT | 
|  | 284 | /* | 
|  | 285 | * If it's at ingress let's pretend the delay is | 
|  | 286 | * from the network (tstamp will be updated). | 
|  | 287 | */ | 
|  | 288 | if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) | 
|  | 289 | skb->tstamp.tv64 = 0; | 
|  | 290 | #endif | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 291 | pr_debug("netem_dequeue: return skb=%p\n", skb); | 
|  | 292 | sch->q.qlen--; | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 293 | return skb; | 
|  | 294 | } | 
| Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 295 |  | 
| Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 296 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 297 | } | 
|  | 298 |  | 
|  | 299 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | } | 
|  | 301 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | static void netem_reset(struct Qdisc *sch) | 
|  | 303 | { | 
|  | 304 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 305 |  | 
|  | 306 | qdisc_reset(q->qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | sch->q.qlen = 0; | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 308 | qdisc_watchdog_cancel(&q->watchdog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | } | 
|  | 310 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | /* | 
|  | 312 | * Distribution data is a variable size payload containing | 
|  | 313 | * signed 16 bit values. | 
|  | 314 | */ | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 315 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | { | 
|  | 317 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 318 | unsigned long n = nla_len(attr)/sizeof(__s16); | 
|  | 319 | const __s16 *data = nla_data(attr); | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 320 | spinlock_t *root_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | struct disttable *d; | 
|  | 322 | int i; | 
|  | 323 |  | 
|  | 324 | if (n > 65536) | 
|  | 325 | return -EINVAL; | 
|  | 326 |  | 
|  | 327 | d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL); | 
|  | 328 | if (!d) | 
|  | 329 | return -ENOMEM; | 
|  | 330 |  | 
|  | 331 | d->size = n; | 
|  | 332 | for (i = 0; i < n; i++) | 
|  | 333 | d->table[i] = data[i]; | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 334 |  | 
| Jarek Poplawski | 102396a | 2008-08-29 14:21:52 -0700 | [diff] [blame] | 335 | root_lock = qdisc_root_sleeping_lock(sch); | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 336 |  | 
|  | 337 | spin_lock_bh(root_lock); | 
| Patrick McHardy | b94c8af | 2008-11-20 04:11:36 -0800 | [diff] [blame] | 338 | kfree(q->delay_dist); | 
|  | 339 | q->delay_dist = d; | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 340 | spin_unlock_bh(root_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | return 0; | 
|  | 342 | } | 
|  | 343 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 344 | static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | { | 
|  | 346 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 347 | const struct tc_netem_corr *c = nla_data(attr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | init_crandom(&q->delay_cor, c->delay_corr); | 
|  | 350 | init_crandom(&q->loss_cor, c->loss_corr); | 
|  | 351 | init_crandom(&q->dup_cor, c->dup_corr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | } | 
|  | 353 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 354 | static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 355 | { | 
|  | 356 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 357 | const struct tc_netem_reorder *r = nla_data(attr); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 358 |  | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 359 | q->reorder = r->probability; | 
|  | 360 | init_crandom(&q->reorder_cor, r->correlation); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 361 | } | 
|  | 362 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 363 | static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 364 | { | 
|  | 365 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 366 | const struct tc_netem_corrupt *r = nla_data(attr); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 367 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 368 | q->corrupt = r->probability; | 
|  | 369 | init_crandom(&q->corrupt_cor, r->correlation); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 370 | } | 
|  | 371 |  | 
| Patrick McHardy | 27a3421 | 2008-01-23 20:35:39 -0800 | [diff] [blame] | 372 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { | 
|  | 373 | [TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) }, | 
|  | 374 | [TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) }, | 
|  | 375 | [TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) }, | 
|  | 376 | }; | 
|  | 377 |  | 
| Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 378 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, | 
|  | 379 | const struct nla_policy *policy, int len) | 
|  | 380 | { | 
|  | 381 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | 
|  | 382 |  | 
|  | 383 | if (nested_len < 0) | 
|  | 384 | return -EINVAL; | 
|  | 385 | if (nested_len >= nla_attr_size(0)) | 
|  | 386 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | 
|  | 387 | nested_len, policy); | 
|  | 388 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); | 
|  | 389 | return 0; | 
|  | 390 | } | 
|  | 391 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 392 | /* Parse netlink message to set options */ | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 393 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | { | 
|  | 395 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 396 | struct nlattr *tb[TCA_NETEM_MAX + 1]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | struct tc_netem_qopt *qopt; | 
|  | 398 | int ret; | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 399 |  | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 400 | if (opt == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | return -EINVAL; | 
|  | 402 |  | 
| Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 403 | qopt = nla_data(opt); | 
|  | 404 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 405 | if (ret < 0) | 
|  | 406 | return ret; | 
|  | 407 |  | 
| Patrick McHardy | fb0305c | 2008-07-05 23:40:21 -0700 | [diff] [blame] | 408 | ret = fifo_set_limit(q->qdisc, qopt->limit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | if (ret) { | 
|  | 410 | pr_debug("netem: can't set fifo limit\n"); | 
|  | 411 | return ret; | 
|  | 412 | } | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 413 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | q->latency = qopt->latency; | 
|  | 415 | q->jitter = qopt->jitter; | 
|  | 416 | q->limit = qopt->limit; | 
|  | 417 | q->gap = qopt->gap; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 418 | q->counter = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | q->loss = qopt->loss; | 
|  | 420 | q->duplicate = qopt->duplicate; | 
|  | 421 |  | 
| Stephen Hemminger | bb2f8cc | 2007-03-23 00:12:09 -0700 | [diff] [blame] | 422 | /* for compatibility with earlier versions. | 
|  | 423 | * if gap is set, need to assume 100% probability | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 424 | */ | 
| Stephen Hemminger | a362e0a | 2007-03-22 12:15:45 -0700 | [diff] [blame] | 425 | if (q->gap) | 
|  | 426 | q->reorder = ~0; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 427 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 428 | if (tb[TCA_NETEM_CORR]) | 
|  | 429 | get_correlation(sch, tb[TCA_NETEM_CORR]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 |  | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 431 | if (tb[TCA_NETEM_DELAY_DIST]) { | 
|  | 432 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); | 
|  | 433 | if (ret) | 
|  | 434 | return ret; | 
|  | 435 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 437 | if (tb[TCA_NETEM_REORDER]) | 
|  | 438 | get_reorder(sch, tb[TCA_NETEM_REORDER]); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 439 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 440 | if (tb[TCA_NETEM_CORRUPT]) | 
|  | 441 | get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 |  | 
|  | 443 | return 0; | 
|  | 444 | } | 
|  | 445 |  | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 446 | /* | 
|  | 447 | * Special case version of FIFO queue for use by netem. | 
|  | 448 | * It queues in order based on timestamps in skb's | 
|  | 449 | */ | 
|  | 450 | struct fifo_sched_data { | 
|  | 451 | u32 limit; | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 452 | psched_time_t oldest; | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 453 | }; | 
|  | 454 |  | 
|  | 455 | static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | 
|  | 456 | { | 
|  | 457 | struct fifo_sched_data *q = qdisc_priv(sch); | 
|  | 458 | struct sk_buff_head *list = &sch->q; | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 459 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 460 | struct sk_buff *skb; | 
|  | 461 |  | 
|  | 462 | if (likely(skb_queue_len(list) < q->limit)) { | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 463 | /* Optimize for add at tail */ | 
| Patrick McHardy | 104e087 | 2007-03-23 11:28:07 -0700 | [diff] [blame] | 464 | if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 465 | q->oldest = tnext; | 
|  | 466 | return qdisc_enqueue_tail(nskb, sch); | 
|  | 467 | } | 
|  | 468 |  | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 469 | skb_queue_reverse_walk(list, skb) { | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 470 | const struct netem_skb_cb *cb = netem_skb_cb(skb); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 471 |  | 
| Patrick McHardy | 104e087 | 2007-03-23 11:28:07 -0700 | [diff] [blame] | 472 | if (tnext >= cb->time_to_send) | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 473 | break; | 
|  | 474 | } | 
|  | 475 |  | 
|  | 476 | __skb_queue_after(list, skb, nskb); | 
|  | 477 |  | 
| Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 478 | sch->qstats.backlog += qdisc_pkt_len(nskb); | 
|  | 479 | sch->bstats.bytes += qdisc_pkt_len(nskb); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 480 | sch->bstats.packets++; | 
|  | 481 |  | 
|  | 482 | return NET_XMIT_SUCCESS; | 
|  | 483 | } | 
|  | 484 |  | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 485 | return qdisc_reshape_fail(nskb, sch); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 486 | } | 
|  | 487 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 488 | static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 489 | { | 
|  | 490 | struct fifo_sched_data *q = qdisc_priv(sch); | 
|  | 491 |  | 
|  | 492 | if (opt) { | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 493 | struct tc_fifo_qopt *ctl = nla_data(opt); | 
|  | 494 | if (nla_len(opt) < sizeof(*ctl)) | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 495 | return -EINVAL; | 
|  | 496 |  | 
|  | 497 | q->limit = ctl->limit; | 
|  | 498 | } else | 
| David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 499 | q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 500 |  | 
| Patrick McHardy | a084980 | 2007-03-23 11:28:30 -0700 | [diff] [blame] | 501 | q->oldest = PSCHED_PASTPERFECT; | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 502 | return 0; | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) | 
|  | 506 | { | 
|  | 507 | struct fifo_sched_data *q = qdisc_priv(sch); | 
|  | 508 | struct tc_fifo_qopt opt = { .limit = q->limit }; | 
|  | 509 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 510 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 511 | return skb->len; | 
|  | 512 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 513 | nla_put_failure: | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 514 | return -1; | 
|  | 515 | } | 
|  | 516 |  | 
| Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 517 | static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 518 | .id		=	"tfifo", | 
|  | 519 | .priv_size	=	sizeof(struct fifo_sched_data), | 
|  | 520 | .enqueue	=	tfifo_enqueue, | 
|  | 521 | .dequeue	=	qdisc_dequeue_head, | 
| Jarek Poplawski | 8e3af978 | 2008-10-31 00:45:55 -0700 | [diff] [blame] | 522 | .peek		=	qdisc_peek_head, | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 523 | .drop		=	qdisc_queue_drop, | 
|  | 524 | .init		=	tfifo_init, | 
|  | 525 | .reset		=	qdisc_reset_queue, | 
|  | 526 | .change		=	tfifo_init, | 
|  | 527 | .dump		=	tfifo_dump, | 
|  | 528 | }; | 
|  | 529 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 530 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | { | 
|  | 532 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 533 | int ret; | 
|  | 534 |  | 
|  | 535 | if (!opt) | 
|  | 536 | return -EINVAL; | 
|  | 537 |  | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 538 | qdisc_watchdog_init(&q->watchdog, sch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 |  | 
| David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 540 | q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 541 | &tfifo_qdisc_ops, | 
| Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 542 | TC_H_MAKE(sch->handle, 1)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | if (!q->qdisc) { | 
|  | 544 | pr_debug("netem: qdisc create failed\n"); | 
|  | 545 | return -ENOMEM; | 
|  | 546 | } | 
|  | 547 |  | 
|  | 548 | ret = netem_change(sch, opt); | 
|  | 549 | if (ret) { | 
|  | 550 | pr_debug("netem: change failed\n"); | 
|  | 551 | qdisc_destroy(q->qdisc); | 
|  | 552 | } | 
|  | 553 | return ret; | 
|  | 554 | } | 
|  | 555 |  | 
|  | 556 | static void netem_destroy(struct Qdisc *sch) | 
|  | 557 | { | 
|  | 558 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 559 |  | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 560 | qdisc_watchdog_cancel(&q->watchdog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | qdisc_destroy(q->qdisc); | 
|  | 562 | kfree(q->delay_dist); | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | 
|  | 566 | { | 
|  | 567 | const struct netem_sched_data *q = qdisc_priv(sch); | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 568 | unsigned char *b = skb_tail_pointer(skb); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 569 | struct nlattr *nla = (struct nlattr *) b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | struct tc_netem_qopt qopt; | 
|  | 571 | struct tc_netem_corr cor; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 572 | struct tc_netem_reorder reorder; | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 573 | struct tc_netem_corrupt corrupt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 |  | 
|  | 575 | qopt.latency = q->latency; | 
|  | 576 | qopt.jitter = q->jitter; | 
|  | 577 | qopt.limit = q->limit; | 
|  | 578 | qopt.loss = q->loss; | 
|  | 579 | qopt.gap = q->gap; | 
|  | 580 | qopt.duplicate = q->duplicate; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 581 | NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 |  | 
|  | 583 | cor.delay_corr = q->delay_cor.rho; | 
|  | 584 | cor.loss_corr = q->loss_cor.rho; | 
|  | 585 | cor.dup_corr = q->dup_cor.rho; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 586 | NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 587 |  | 
|  | 588 | reorder.probability = q->reorder; | 
|  | 589 | reorder.correlation = q->reorder_cor.rho; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 590 | NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 591 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 592 | corrupt.probability = q->corrupt; | 
|  | 593 | corrupt.correlation = q->corrupt_cor.rho; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 594 | NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 595 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 596 | nla->nla_len = skb_tail_pointer(skb) - b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 |  | 
|  | 598 | return skb->len; | 
|  | 599 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 600 | nla_put_failure: | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 601 | nlmsg_trim(skb, b); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | return -1; | 
|  | 603 | } | 
|  | 604 |  | 
| Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 605 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | .id		=	"netem", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | .priv_size	=	sizeof(struct netem_sched_data), | 
|  | 608 | .enqueue	=	netem_enqueue, | 
|  | 609 | .dequeue	=	netem_dequeue, | 
| Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 610 | .peek		=	qdisc_peek_dequeued, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | .drop		=	netem_drop, | 
|  | 612 | .init		=	netem_init, | 
|  | 613 | .reset		=	netem_reset, | 
|  | 614 | .destroy	=	netem_destroy, | 
|  | 615 | .change		=	netem_change, | 
|  | 616 | .dump		=	netem_dump, | 
|  | 617 | .owner		=	THIS_MODULE, | 
|  | 618 | }; | 
|  | 619 |  | 
|  | 620 |  | 
|  | 621 | static int __init netem_module_init(void) | 
|  | 622 | { | 
| Stephen Hemminger | eb229c4 | 2005-11-03 13:49:01 -0800 | [diff] [blame] | 623 | pr_info("netem: version " VERSION "\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | return register_qdisc(&netem_qdisc_ops); | 
|  | 625 | } | 
|  | 626 | static void __exit netem_module_exit(void) | 
|  | 627 | { | 
|  | 628 | unregister_qdisc(&netem_qdisc_ops); | 
|  | 629 | } | 
|  | 630 | module_init(netem_module_init) | 
|  | 631 | module_exit(netem_module_exit) | 
|  | 632 | MODULE_LICENSE("GPL"); |