| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * net/sched/sch_netem.c	Network emulator | 
|  | 3 | * | 
|  | 4 | * 		This program is free software; you can redistribute it and/or | 
|  | 5 | * 		modify it under the terms of the GNU General Public License | 
|  | 6 | * 		as published by the Free Software Foundation; either version | 
| Stephen Hemminger | 798b6b1 | 2006-10-22 20:16:57 -0700 | [diff] [blame] | 7 | * 		2 of the License. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * | 
|  | 9 | *  		Many of the algorithms and ideas for this came from | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 10 | *		NIST Net which is not copyrighted. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * | 
|  | 12 | * Authors:	Stephen Hemminger <shemminger@osdl.org> | 
|  | 13 | *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> | 
|  | 14 | */ | 
|  | 15 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/module.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/types.h> | 
|  | 19 | #include <linux/kernel.h> | 
|  | 20 | #include <linux/errno.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/skbuff.h> | 
|  | 22 | #include <linux/rtnetlink.h> | 
|  | 23 |  | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 24 | #include <net/netlink.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <net/pkt_sched.h> | 
|  | 26 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 27 | #define VERSION "1.2" | 
| Stephen Hemminger | eb229c4 | 2005-11-03 13:49:01 -0800 | [diff] [blame] | 28 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | /*	Network Emulation Queuing algorithm. | 
|  | 30 | ==================================== | 
|  | 31 |  | 
|  | 32 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based | 
|  | 33 | Network Emulation Tool | 
|  | 34 | [2] Luigi Rizzo, DummyNet for FreeBSD | 
|  | 35 |  | 
|  | 36 | ---------------------------------------------------------------- | 
|  | 37 |  | 
|  | 38 | This started out as a simple way to delay outgoing packets to | 
|  | 39 | test TCP but has grown to include most of the functionality | 
|  | 40 | of a full blown network emulator like NISTnet. It can delay | 
|  | 41 | packets and add random jitter (and correlation). The random | 
|  | 42 | distribution can be loaded from a table as well to provide | 
|  | 43 | normal, Pareto, or experimental curves. Packet loss, | 
|  | 44 | duplication, and reordering can also be emulated. | 
|  | 45 |  | 
|  | 46 | This qdisc does not do classification that can be handled in | 
|  | 47 | layering other disciplines.  It does not need to do bandwidth | 
|  | 48 | control either since that can be handled by using token | 
|  | 49 | bucket or other rate control. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | */ | 
|  | 51 |  | 
|  | 52 | struct netem_sched_data { | 
|  | 53 | struct Qdisc	*qdisc; | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 54 | struct qdisc_watchdog watchdog; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 56 | psched_tdiff_t latency; | 
|  | 57 | psched_tdiff_t jitter; | 
|  | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | u32 loss; | 
|  | 60 | u32 limit; | 
|  | 61 | u32 counter; | 
|  | 62 | u32 gap; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | u32 duplicate; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 64 | u32 reorder; | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 65 | u32 corrupt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 |  | 
|  | 67 | struct crndstate { | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 68 | u32 last; | 
|  | 69 | u32 rho; | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 70 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
|  | 72 | struct disttable { | 
|  | 73 | u32  size; | 
|  | 74 | s16 table[0]; | 
|  | 75 | } *delay_dist; | 
|  | 76 | }; | 
|  | 77 |  | 
|  | 78 | /* Time stamp put into socket buffer control block */ | 
|  | 79 | struct netem_skb_cb { | 
|  | 80 | psched_time_t	time_to_send; | 
|  | 81 | }; | 
|  | 82 |  | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 83 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) | 
|  | 84 | { | 
| Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 85 | BUILD_BUG_ON(sizeof(skb->cb) < | 
|  | 86 | sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); | 
|  | 87 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 88 | } | 
|  | 89 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* init_crandom - initialize correlated random number generator | 
|  | 91 | * Use entropy source for initial seed. | 
|  | 92 | */ | 
|  | 93 | static void init_crandom(struct crndstate *state, unsigned long rho) | 
|  | 94 | { | 
|  | 95 | state->rho = rho; | 
|  | 96 | state->last = net_random(); | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | /* get_crandom - correlated random number generator | 
|  | 100 | * Next number depends on last value. | 
|  | 101 | * rho is scaled to avoid floating point. | 
|  | 102 | */ | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 103 | static u32 get_crandom(struct crndstate *state) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | { | 
|  | 105 | u64 value, rho; | 
|  | 106 | unsigned long answer; | 
|  | 107 |  | 
| Stephen Hemminger | bb2f8cc | 2007-03-23 00:12:09 -0700 | [diff] [blame] | 108 | if (state->rho == 0)	/* no correlation */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | return net_random(); | 
|  | 110 |  | 
|  | 111 | value = net_random(); | 
|  | 112 | rho = (u64)state->rho + 1; | 
|  | 113 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; | 
|  | 114 | state->last = answer; | 
|  | 115 | return answer; | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | /* tabledist - return a pseudo-randomly distributed value with mean mu and | 
|  | 119 | * std deviation sigma.  Uses table lookup to approximate the desired | 
|  | 120 | * distribution, and a uniformly-distributed pseudo-random source. | 
|  | 121 | */ | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 122 | static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, | 
|  | 123 | struct crndstate *state, | 
|  | 124 | const struct disttable *dist) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { | 
| Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 126 | psched_tdiff_t x; | 
|  | 127 | long t; | 
|  | 128 | u32 rnd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 |  | 
|  | 130 | if (sigma == 0) | 
|  | 131 | return mu; | 
|  | 132 |  | 
|  | 133 | rnd = get_crandom(state); | 
|  | 134 |  | 
|  | 135 | /* default uniform distribution */ | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 136 | if (dist == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | return (rnd % (2*sigma)) - sigma + mu; | 
|  | 138 |  | 
|  | 139 | t = dist->table[rnd % dist->size]; | 
|  | 140 | x = (sigma % NETEM_DIST_SCALE) * t; | 
|  | 141 | if (x >= 0) | 
|  | 142 | x += NETEM_DIST_SCALE/2; | 
|  | 143 | else | 
|  | 144 | x -= NETEM_DIST_SCALE/2; | 
|  | 145 |  | 
|  | 146 | return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | 
|  | 147 | } | 
|  | 148 |  | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 149 | /* | 
|  | 150 | * Insert one skb into qdisc. | 
|  | 151 | * Note: parent depends on return value to account for queue length. | 
|  | 152 | * 	NET_XMIT_DROP: queue length didn't change. | 
|  | 153 | *      NET_XMIT_SUCCESS: one skb was queued. | 
|  | 154 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 
|  | 156 | { | 
|  | 157 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Guillaume Chazarain | 89e1df7 | 2006-07-21 14:45:25 -0700 | [diff] [blame] | 158 | /* We don't fill cb now as skb_unshare() may invalidate it */ | 
|  | 159 | struct netem_skb_cb *cb; | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 160 | struct sk_buff *skb2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | int ret; | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 162 | int count = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 |  | 
| Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 164 | pr_debug("netem_enqueue skb=%p\n", skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 |  | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 166 | /* Random duplication */ | 
|  | 167 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | 
|  | 168 | ++count; | 
|  | 169 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | /* Random packet drop 0 => none, ~0 => all */ | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 171 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) | 
|  | 172 | --count; | 
|  | 173 |  | 
|  | 174 | if (count == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | sch->qstats.drops++; | 
|  | 176 | kfree_skb(skb); | 
| Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 177 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } | 
|  | 179 |  | 
| David S. Miller | 4e8a520 | 2006-10-22 21:00:33 -0700 | [diff] [blame] | 180 | skb_orphan(skb); | 
|  | 181 |  | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 182 | /* | 
|  | 183 | * If we need to duplicate packet, then re-insert at top of the | 
|  | 184 | * qdisc tree, since parent queuer expects that only one | 
|  | 185 | * skb will be queued. | 
|  | 186 | */ | 
|  | 187 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 188 | struct Qdisc *rootq = qdisc_root(sch); | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 189 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | 
|  | 190 | q->duplicate = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 |  | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 192 | qdisc_enqueue_root(skb2, rootq); | 
| Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 193 | q->duplicate = dupsave; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | } | 
|  | 195 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 196 | /* | 
|  | 197 | * Randomized packet corruption. | 
|  | 198 | * Make copy if needed since we are modifying | 
|  | 199 | * If packet is going to be hardware checksummed, then | 
|  | 200 | * do it now in software before we mangle it. | 
|  | 201 | */ | 
|  | 202 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 203 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || | 
|  | 204 | (skb->ip_summed == CHECKSUM_PARTIAL && | 
|  | 205 | skb_checksum_help(skb))) { | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 206 | sch->qstats.drops++; | 
|  | 207 | return NET_XMIT_DROP; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | 
|  | 211 | } | 
|  | 212 |  | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 213 | cb = netem_skb_cb(skb); | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 214 | if (q->gap == 0 || 		/* not doing reordering */ | 
|  | 215 | q->counter < q->gap || 	/* inside last reordering gap */ | 
|  | 216 | q->reorder < get_crandom(&q->reorder_cor)) { | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 217 | psched_time_t now; | 
| Stephen Hemminger | 07aaa11 | 2005-11-03 13:43:07 -0800 | [diff] [blame] | 218 | psched_tdiff_t delay; | 
|  | 219 |  | 
|  | 220 | delay = tabledist(q->latency, q->jitter, | 
|  | 221 | &q->delay_cor, q->delay_dist); | 
|  | 222 |  | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 223 | now = psched_get_time(); | 
| Patrick McHardy | 7c59e25 | 2007-03-23 11:27:45 -0700 | [diff] [blame] | 224 | cb->time_to_send = now + delay; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | ++q->counter; | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 226 | ret = qdisc_enqueue(skb, q->qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } else { | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 228 | /* | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 229 | * Do re-ordering by putting one out of N packets at the front | 
|  | 230 | * of the queue. | 
|  | 231 | */ | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 232 | cb->time_to_send = psched_get_time(); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 233 | q->counter = 0; | 
| Jarek Poplawski | 8ba25da | 2008-11-02 00:36:03 -0700 | [diff] [blame] | 234 |  | 
|  | 235 | __skb_queue_head(&q->qdisc->q, skb); | 
|  | 236 | q->qdisc->qstats.backlog += qdisc_pkt_len(skb); | 
|  | 237 | q->qdisc->qstats.requeues++; | 
|  | 238 | ret = NET_XMIT_SUCCESS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } | 
|  | 240 |  | 
|  | 241 | if (likely(ret == NET_XMIT_SUCCESS)) { | 
|  | 242 | sch->q.qlen++; | 
| Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 243 | sch->bstats.bytes += qdisc_pkt_len(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | sch->bstats.packets++; | 
| Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 245 | } else if (net_xmit_drop_count(ret)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | sch->qstats.drops++; | 
| Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 247 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 |  | 
| Stephen Hemminger | d5d75cd | 2005-05-03 16:24:57 -0700 | [diff] [blame] | 249 | pr_debug("netem: enqueue ret %d\n", ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | return ret; | 
|  | 251 | } | 
|  | 252 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | static unsigned int netem_drop(struct Qdisc* sch) | 
|  | 254 | { | 
|  | 255 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 6d037a2 | 2006-03-20 19:00:49 -0800 | [diff] [blame] | 256 | unsigned int len = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 |  | 
| Patrick McHardy | 6d037a2 | 2006-03-20 19:00:49 -0800 | [diff] [blame] | 258 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | sch->q.qlen--; | 
|  | 260 | sch->qstats.drops++; | 
|  | 261 | } | 
|  | 262 | return len; | 
|  | 263 | } | 
|  | 264 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) | 
|  | 266 | { | 
|  | 267 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 268 | struct sk_buff *skb; | 
|  | 269 |  | 
| Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 270 | if (sch->flags & TCQ_F_THROTTLED) | 
|  | 271 | return NULL; | 
|  | 272 |  | 
| Jarek Poplawski | 03c05f0 | 2008-10-31 00:46:19 -0700 | [diff] [blame] | 273 | skb = q->qdisc->ops->peek(q->qdisc); | 
| Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 274 | if (skb) { | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 275 | const struct netem_skb_cb *cb = netem_skb_cb(skb); | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 276 | psched_time_t now = psched_get_time(); | 
| Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 277 |  | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 278 | /* if more time remaining? */ | 
| Patrick McHardy | 104e087 | 2007-03-23 11:28:07 -0700 | [diff] [blame] | 279 | if (cb->time_to_send <= now) { | 
| Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 280 | skb = qdisc_dequeue_peeked(q->qdisc); | 
|  | 281 | if (unlikely(!skb)) | 
| Jarek Poplawski | 03c05f0 | 2008-10-31 00:46:19 -0700 | [diff] [blame] | 282 | return NULL; | 
|  | 283 |  | 
| Jarek Poplawski | 8caf153 | 2009-04-17 10:08:49 +0000 | [diff] [blame] | 284 | #ifdef CONFIG_NET_CLS_ACT | 
|  | 285 | /* | 
|  | 286 | * If it's at ingress let's pretend the delay is | 
|  | 287 | * from the network (tstamp will be updated). | 
|  | 288 | */ | 
|  | 289 | if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) | 
|  | 290 | skb->tstamp.tv64 = 0; | 
|  | 291 | #endif | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 292 | pr_debug("netem_dequeue: return skb=%p\n", skb); | 
|  | 293 | sch->q.qlen--; | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 294 | return skb; | 
|  | 295 | } | 
| Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 296 |  | 
| Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 297 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); | 
| Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 298 | } | 
|  | 299 |  | 
|  | 300 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } | 
|  | 302 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | static void netem_reset(struct Qdisc *sch) | 
|  | 304 | { | 
|  | 305 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 306 |  | 
|  | 307 | qdisc_reset(q->qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | sch->q.qlen = 0; | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 309 | qdisc_watchdog_cancel(&q->watchdog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | } | 
|  | 311 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | /* | 
|  | 313 | * Distribution data is a variable size payload containing | 
|  | 314 | * signed 16 bit values. | 
|  | 315 | */ | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 316 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | { | 
|  | 318 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 319 | unsigned long n = nla_len(attr)/sizeof(__s16); | 
|  | 320 | const __s16 *data = nla_data(attr); | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 321 | spinlock_t *root_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | struct disttable *d; | 
|  | 323 | int i; | 
|  | 324 |  | 
|  | 325 | if (n > 65536) | 
|  | 326 | return -EINVAL; | 
|  | 327 |  | 
|  | 328 | d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL); | 
|  | 329 | if (!d) | 
|  | 330 | return -ENOMEM; | 
|  | 331 |  | 
|  | 332 | d->size = n; | 
|  | 333 | for (i = 0; i < n; i++) | 
|  | 334 | d->table[i] = data[i]; | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 335 |  | 
| Jarek Poplawski | 102396a | 2008-08-29 14:21:52 -0700 | [diff] [blame] | 336 | root_lock = qdisc_root_sleeping_lock(sch); | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 337 |  | 
|  | 338 | spin_lock_bh(root_lock); | 
| Patrick McHardy | b94c8af | 2008-11-20 04:11:36 -0800 | [diff] [blame] | 339 | kfree(q->delay_dist); | 
|  | 340 | q->delay_dist = d; | 
| David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 341 | spin_unlock_bh(root_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | return 0; | 
|  | 343 | } | 
|  | 344 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 345 | static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | { | 
|  | 347 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 348 | const struct tc_netem_corr *c = nla_data(attr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | init_crandom(&q->delay_cor, c->delay_corr); | 
|  | 351 | init_crandom(&q->loss_cor, c->loss_corr); | 
|  | 352 | init_crandom(&q->dup_cor, c->dup_corr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | } | 
|  | 354 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 355 | static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 356 | { | 
|  | 357 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 358 | const struct tc_netem_reorder *r = nla_data(attr); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 359 |  | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 360 | q->reorder = r->probability; | 
|  | 361 | init_crandom(&q->reorder_cor, r->correlation); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 362 | } | 
|  | 363 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 364 | static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 365 | { | 
|  | 366 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 367 | const struct tc_netem_corrupt *r = nla_data(attr); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 368 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 369 | q->corrupt = r->probability; | 
|  | 370 | init_crandom(&q->corrupt_cor, r->correlation); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 371 | } | 
|  | 372 |  | 
| Patrick McHardy | 27a3421 | 2008-01-23 20:35:39 -0800 | [diff] [blame] | 373 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { | 
|  | 374 | [TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) }, | 
|  | 375 | [TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) }, | 
|  | 376 | [TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) }, | 
|  | 377 | }; | 
|  | 378 |  | 
| Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 379 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, | 
|  | 380 | const struct nla_policy *policy, int len) | 
|  | 381 | { | 
|  | 382 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | 
|  | 383 |  | 
|  | 384 | if (nested_len < 0) | 
|  | 385 | return -EINVAL; | 
|  | 386 | if (nested_len >= nla_attr_size(0)) | 
|  | 387 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | 
|  | 388 | nested_len, policy); | 
|  | 389 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); | 
|  | 390 | return 0; | 
|  | 391 | } | 
|  | 392 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 393 | /* Parse netlink message to set options */ | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 394 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { | 
|  | 396 | struct netem_sched_data *q = qdisc_priv(sch); | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 397 | struct nlattr *tb[TCA_NETEM_MAX + 1]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | struct tc_netem_qopt *qopt; | 
|  | 399 | int ret; | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 400 |  | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 401 | if (opt == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | return -EINVAL; | 
|  | 403 |  | 
| Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 404 | qopt = nla_data(opt); | 
|  | 405 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 406 | if (ret < 0) | 
|  | 407 | return ret; | 
|  | 408 |  | 
| Patrick McHardy | fb0305c | 2008-07-05 23:40:21 -0700 | [diff] [blame] | 409 | ret = fifo_set_limit(q->qdisc, qopt->limit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | if (ret) { | 
|  | 411 | pr_debug("netem: can't set fifo limit\n"); | 
|  | 412 | return ret; | 
|  | 413 | } | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 414 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | q->latency = qopt->latency; | 
|  | 416 | q->jitter = qopt->jitter; | 
|  | 417 | q->limit = qopt->limit; | 
|  | 418 | q->gap = qopt->gap; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 419 | q->counter = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | q->loss = qopt->loss; | 
|  | 421 | q->duplicate = qopt->duplicate; | 
|  | 422 |  | 
| Stephen Hemminger | bb2f8cc | 2007-03-23 00:12:09 -0700 | [diff] [blame] | 423 | /* for compatibility with earlier versions. | 
|  | 424 | * if gap is set, need to assume 100% probability | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 425 | */ | 
| Stephen Hemminger | a362e0a | 2007-03-22 12:15:45 -0700 | [diff] [blame] | 426 | if (q->gap) | 
|  | 427 | q->reorder = ~0; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 428 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 429 | if (tb[TCA_NETEM_CORR]) | 
|  | 430 | get_correlation(sch, tb[TCA_NETEM_CORR]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 |  | 
| Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 432 | if (tb[TCA_NETEM_DELAY_DIST]) { | 
|  | 433 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); | 
|  | 434 | if (ret) | 
|  | 435 | return ret; | 
|  | 436 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 438 | if (tb[TCA_NETEM_REORDER]) | 
|  | 439 | get_reorder(sch, tb[TCA_NETEM_REORDER]); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 440 |  | 
| Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 441 | if (tb[TCA_NETEM_CORRUPT]) | 
|  | 442 | get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 |  | 
|  | 444 | return 0; | 
|  | 445 | } | 
|  | 446 |  | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 447 | /* | 
|  | 448 | * Special case version of FIFO queue for use by netem. | 
|  | 449 | * It queues in order based on timestamps in skb's | 
|  | 450 | */ | 
|  | 451 | struct fifo_sched_data { | 
|  | 452 | u32 limit; | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 453 | psched_time_t oldest; | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 454 | }; | 
|  | 455 |  | 
|  | 456 | static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | 
|  | 457 | { | 
|  | 458 | struct fifo_sched_data *q = qdisc_priv(sch); | 
|  | 459 | struct sk_buff_head *list = &sch->q; | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 460 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 461 | struct sk_buff *skb; | 
|  | 462 |  | 
|  | 463 | if (likely(skb_queue_len(list) < q->limit)) { | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 464 | /* Optimize for add at tail */ | 
| Patrick McHardy | 104e087 | 2007-03-23 11:28:07 -0700 | [diff] [blame] | 465 | if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 466 | q->oldest = tnext; | 
|  | 467 | return qdisc_enqueue_tail(nskb, sch); | 
|  | 468 | } | 
|  | 469 |  | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 470 | skb_queue_reverse_walk(list, skb) { | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 471 | const struct netem_skb_cb *cb = netem_skb_cb(skb); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 472 |  | 
| Patrick McHardy | 104e087 | 2007-03-23 11:28:07 -0700 | [diff] [blame] | 473 | if (tnext >= cb->time_to_send) | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 474 | break; | 
|  | 475 | } | 
|  | 476 |  | 
|  | 477 | __skb_queue_after(list, skb, nskb); | 
|  | 478 |  | 
| Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 479 | sch->qstats.backlog += qdisc_pkt_len(nskb); | 
|  | 480 | sch->bstats.bytes += qdisc_pkt_len(nskb); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 481 | sch->bstats.packets++; | 
|  | 482 |  | 
|  | 483 | return NET_XMIT_SUCCESS; | 
|  | 484 | } | 
|  | 485 |  | 
| Stephen Hemminger | 075aa57 | 2007-03-22 12:17:05 -0700 | [diff] [blame] | 486 | return qdisc_reshape_fail(nskb, sch); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 487 | } | 
|  | 488 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 489 | static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 490 | { | 
|  | 491 | struct fifo_sched_data *q = qdisc_priv(sch); | 
|  | 492 |  | 
|  | 493 | if (opt) { | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 494 | struct tc_fifo_qopt *ctl = nla_data(opt); | 
|  | 495 | if (nla_len(opt) < sizeof(*ctl)) | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 496 | return -EINVAL; | 
|  | 497 |  | 
|  | 498 | q->limit = ctl->limit; | 
|  | 499 | } else | 
| David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 500 | q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 501 |  | 
| Patrick McHardy | a084980 | 2007-03-23 11:28:30 -0700 | [diff] [blame] | 502 | q->oldest = PSCHED_PASTPERFECT; | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 503 | return 0; | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) | 
|  | 507 | { | 
|  | 508 | struct fifo_sched_data *q = qdisc_priv(sch); | 
|  | 509 | struct tc_fifo_qopt opt = { .limit = q->limit }; | 
|  | 510 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 511 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 512 | return skb->len; | 
|  | 513 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 514 | nla_put_failure: | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 515 | return -1; | 
|  | 516 | } | 
|  | 517 |  | 
| Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 518 | static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 519 | .id		=	"tfifo", | 
|  | 520 | .priv_size	=	sizeof(struct fifo_sched_data), | 
|  | 521 | .enqueue	=	tfifo_enqueue, | 
|  | 522 | .dequeue	=	qdisc_dequeue_head, | 
| Jarek Poplawski | 8e3af978 | 2008-10-31 00:45:55 -0700 | [diff] [blame] | 523 | .peek		=	qdisc_peek_head, | 
| Stephen Hemminger | 300ce17 | 2005-10-30 13:47:34 -0800 | [diff] [blame] | 524 | .drop		=	qdisc_queue_drop, | 
|  | 525 | .init		=	tfifo_init, | 
|  | 526 | .reset		=	qdisc_reset_queue, | 
|  | 527 | .change		=	tfifo_init, | 
|  | 528 | .dump		=	tfifo_dump, | 
|  | 529 | }; | 
|  | 530 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 531 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | { | 
|  | 533 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 534 | int ret; | 
|  | 535 |  | 
|  | 536 | if (!opt) | 
|  | 537 | return -EINVAL; | 
|  | 538 |  | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 539 | qdisc_watchdog_init(&q->watchdog, sch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 |  | 
| David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 541 | q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 542 | &tfifo_qdisc_ops, | 
| Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 543 | TC_H_MAKE(sch->handle, 1)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | if (!q->qdisc) { | 
|  | 545 | pr_debug("netem: qdisc create failed\n"); | 
|  | 546 | return -ENOMEM; | 
|  | 547 | } | 
|  | 548 |  | 
|  | 549 | ret = netem_change(sch, opt); | 
|  | 550 | if (ret) { | 
|  | 551 | pr_debug("netem: change failed\n"); | 
|  | 552 | qdisc_destroy(q->qdisc); | 
|  | 553 | } | 
|  | 554 | return ret; | 
|  | 555 | } | 
|  | 556 |  | 
|  | 557 | static void netem_destroy(struct Qdisc *sch) | 
|  | 558 | { | 
|  | 559 | struct netem_sched_data *q = qdisc_priv(sch); | 
|  | 560 |  | 
| Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 561 | qdisc_watchdog_cancel(&q->watchdog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | qdisc_destroy(q->qdisc); | 
|  | 563 | kfree(q->delay_dist); | 
|  | 564 | } | 
|  | 565 |  | 
|  | 566 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | 
|  | 567 | { | 
|  | 568 | const struct netem_sched_data *q = qdisc_priv(sch); | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 569 | unsigned char *b = skb_tail_pointer(skb); | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 570 | struct nlattr *nla = (struct nlattr *) b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | struct tc_netem_qopt qopt; | 
|  | 572 | struct tc_netem_corr cor; | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 573 | struct tc_netem_reorder reorder; | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 574 | struct tc_netem_corrupt corrupt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 |  | 
|  | 576 | qopt.latency = q->latency; | 
|  | 577 | qopt.jitter = q->jitter; | 
|  | 578 | qopt.limit = q->limit; | 
|  | 579 | qopt.loss = q->loss; | 
|  | 580 | qopt.gap = q->gap; | 
|  | 581 | qopt.duplicate = q->duplicate; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 582 | NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 |  | 
|  | 584 | cor.delay_corr = q->delay_cor.rho; | 
|  | 585 | cor.loss_corr = q->loss_cor.rho; | 
|  | 586 | cor.dup_corr = q->dup_cor.rho; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 587 | NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 588 |  | 
|  | 589 | reorder.probability = q->reorder; | 
|  | 590 | reorder.correlation = q->reorder_cor.rho; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 591 | NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); | 
| Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 592 |  | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 593 | corrupt.probability = q->corrupt; | 
|  | 594 | corrupt.correlation = q->corrupt_cor.rho; | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 595 | NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); | 
| Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 596 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 597 | nla->nla_len = skb_tail_pointer(skb) - b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 |  | 
|  | 599 | return skb->len; | 
|  | 600 |  | 
| Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 601 | nla_put_failure: | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 602 | nlmsg_trim(skb, b); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | return -1; | 
|  | 604 | } | 
|  | 605 |  | 
| Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 606 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | .id		=	"netem", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | .priv_size	=	sizeof(struct netem_sched_data), | 
|  | 609 | .enqueue	=	netem_enqueue, | 
|  | 610 | .dequeue	=	netem_dequeue, | 
| Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 611 | .peek		=	qdisc_peek_dequeued, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | .drop		=	netem_drop, | 
|  | 613 | .init		=	netem_init, | 
|  | 614 | .reset		=	netem_reset, | 
|  | 615 | .destroy	=	netem_destroy, | 
|  | 616 | .change		=	netem_change, | 
|  | 617 | .dump		=	netem_dump, | 
|  | 618 | .owner		=	THIS_MODULE, | 
|  | 619 | }; | 
|  | 620 |  | 
|  | 621 |  | 
|  | 622 | static int __init netem_module_init(void) | 
|  | 623 | { | 
| Stephen Hemminger | eb229c4 | 2005-11-03 13:49:01 -0800 | [diff] [blame] | 624 | pr_info("netem: version " VERSION "\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | return register_qdisc(&netem_qdisc_ops); | 
|  | 626 | } | 
|  | 627 | static void __exit netem_module_exit(void) | 
|  | 628 | { | 
|  | 629 | unregister_qdisc(&netem_qdisc_ops); | 
|  | 630 | } | 
|  | 631 | module_init(netem_module_init) | 
|  | 632 | module_exit(netem_module_exit) | 
|  | 633 | MODULE_LICENSE("GPL"); |