blob: 28b3f7e83517360da1dc3d6061cfdacffec2822d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/skbuff.h>
22#include <linux/rtnetlink.h>
23
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070024#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <net/pkt_sched.h>
26
stephen hemminger250a65f2011-02-23 13:04:22 +000027#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/* Network Emulation Queuing algorithm.
30 ====================================
31
32 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 Network Emulation Tool
34 [2] Luigi Rizzo, DummyNet for FreeBSD
35
36 ----------------------------------------------------------------
37
38 This started out as a simple way to delay outgoing packets to
39 test TCP but has grown to include most of the functionality
40 of a full blown network emulator like NISTnet. It can delay
41 packets and add random jitter (and correlation). The random
42 distribution can be loaded from a table as well to provide
43 normal, Pareto, or experimental curves. Packet loss,
44 duplication, and reordering can also be emulated.
45
46 This qdisc does not do classification that can be handled in
47 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token
49 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000050
51 Correlated Loss Generator models
52
53 Added generation of correlated loss according to the
54 "Gilbert-Elliot" model, a 4-state markov model.
55
56 References:
57 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
58 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
59 and intuitive loss model for packet networks and its implementation
60 in the Netem module in the Linux kernel", available in [1]
61
62 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
63 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064*/
65
66struct netem_sched_data {
67 struct Qdisc *qdisc;
Patrick McHardy59cb5c62007-03-16 01:20:31 -070068 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Stephen Hemmingerb4076212007-03-22 12:16:21 -070070 psched_tdiff_t latency;
71 psched_tdiff_t jitter;
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 u32 loss;
74 u32 limit;
75 u32 counter;
76 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070078 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080079 u32 corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070082 u32 last;
83 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080084 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86 struct disttable {
87 u32 size;
88 s16 table[0];
89 } *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +000090
91 enum {
92 CLG_RANDOM,
93 CLG_4_STATES,
94 CLG_GILB_ELL,
95 } loss_model;
96
97 /* Correlated Loss Generation models */
98 struct clgstate {
99 /* state of the Markov chain */
100 u8 state;
101
102 /* 4-states and Gilbert-Elliot models */
103 u32 a1; /* p13 for 4-states or p for GE */
104 u32 a2; /* p31 for 4-states or r for GE */
105 u32 a3; /* p32 for 4-states or h for GE */
106 u32 a4; /* p14 for 4-states or 1-k for GE */
107 u32 a5; /* p23 used only in 4-states */
108 } clg;
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110};
111
112/* Time stamp put into socket buffer control block */
113struct netem_skb_cb {
114 psched_time_t time_to_send;
115};
116
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700117static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
118{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700119 BUILD_BUG_ON(sizeof(skb->cb) <
120 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
121 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124/* init_crandom - initialize correlated random number generator
125 * Use entropy source for initial seed.
126 */
127static void init_crandom(struct crndstate *state, unsigned long rho)
128{
129 state->rho = rho;
130 state->last = net_random();
131}
132
133/* get_crandom - correlated random number generator
134 * Next number depends on last value.
135 * rho is scaled to avoid floating point.
136 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700137static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 u64 value, rho;
140 unsigned long answer;
141
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700142 if (state->rho == 0) /* no correlation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 return net_random();
144
145 value = net_random();
146 rho = (u64)state->rho + 1;
147 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
148 state->last = answer;
149 return answer;
150}
151
stephen hemminger661b7972011-02-23 13:04:21 +0000152/* loss_4state - 4-state model loss generator
153 * Generates losses according to the 4-state Markov chain adopted in
154 * the GI (General and Intuitive) loss model.
155 */
156static bool loss_4state(struct netem_sched_data *q)
157{
158 struct clgstate *clg = &q->clg;
159 u32 rnd = net_random();
160
161 /*
162 * Makes a comparision between rnd and the transition
163 * probabilities outgoing from the current state, then decides the
164 * next state and if the next packet has to be transmitted or lost.
165 * The four states correspond to:
166 * 1 => successfully transmitted packets within a gap period
167 * 4 => isolated losses within a gap period
168 * 3 => lost packets within a burst period
169 * 2 => successfully transmitted packets within a burst period
170 */
171 switch (clg->state) {
172 case 1:
173 if (rnd < clg->a4) {
174 clg->state = 4;
175 return true;
176 } else if (clg->a4 < rnd && rnd < clg->a1) {
177 clg->state = 3;
178 return true;
179 } else if (clg->a1 < rnd)
180 clg->state = 1;
181
182 break;
183 case 2:
184 if (rnd < clg->a5) {
185 clg->state = 3;
186 return true;
187 } else
188 clg->state = 2;
189
190 break;
191 case 3:
192 if (rnd < clg->a3)
193 clg->state = 2;
194 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
195 clg->state = 1;
196 return true;
197 } else if (clg->a2 + clg->a3 < rnd) {
198 clg->state = 3;
199 return true;
200 }
201 break;
202 case 4:
203 clg->state = 1;
204 break;
205 }
206
207 return false;
208}
209
210/* loss_gilb_ell - Gilbert-Elliot model loss generator
211 * Generates losses according to the Gilbert-Elliot loss model or
212 * its special cases (Gilbert or Simple Gilbert)
213 *
214 * Makes a comparision between random number and the transition
215 * probabilities outgoing from the current state, then decides the
216 * next state. A second random number is extracted and the comparision
217 * with the loss probability of the current state decides if the next
218 * packet will be transmitted or lost.
219 */
220static bool loss_gilb_ell(struct netem_sched_data *q)
221{
222 struct clgstate *clg = &q->clg;
223
224 switch (clg->state) {
225 case 1:
226 if (net_random() < clg->a1)
227 clg->state = 2;
228 if (net_random() < clg->a4)
229 return true;
230 case 2:
231 if (net_random() < clg->a2)
232 clg->state = 1;
233 if (clg->a3 > net_random())
234 return true;
235 }
236
237 return false;
238}
239
240static bool loss_event(struct netem_sched_data *q)
241{
242 switch (q->loss_model) {
243 case CLG_RANDOM:
244 /* Random packet drop 0 => none, ~0 => all */
245 return q->loss && q->loss >= get_crandom(&q->loss_cor);
246
247 case CLG_4_STATES:
248 /* 4state loss model algorithm (used also for GI model)
249 * Extracts a value from the markov 4 state loss generator,
250 * if it is 1 drops a packet and if needed writes the event in
251 * the kernel logs
252 */
253 return loss_4state(q);
254
255 case CLG_GILB_ELL:
256 /* Gilbert-Elliot loss model algorithm
257 * Extracts a value from the Gilbert-Elliot loss generator,
258 * if it is 1 drops a packet and if needed writes the event in
259 * the kernel logs
260 */
261 return loss_gilb_ell(q);
262 }
263
264 return false; /* not reached */
265}
266
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/* tabledist - return a pseudo-randomly distributed value with mean mu and
269 * std deviation sigma. Uses table lookup to approximate the desired
270 * distribution, and a uniformly-distributed pseudo-random source.
271 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700272static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
273 struct crndstate *state,
274 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700276 psched_tdiff_t x;
277 long t;
278 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280 if (sigma == 0)
281 return mu;
282
283 rnd = get_crandom(state);
284
285 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900286 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 return (rnd % (2*sigma)) - sigma + mu;
288
289 t = dist->table[rnd % dist->size];
290 x = (sigma % NETEM_DIST_SCALE) * t;
291 if (x >= 0)
292 x += NETEM_DIST_SCALE/2;
293 else
294 x -= NETEM_DIST_SCALE/2;
295
296 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
297}
298
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700299/*
300 * Insert one skb into qdisc.
301 * Note: parent depends on return value to account for queue length.
302 * NET_XMIT_DROP: queue length didn't change.
303 * NET_XMIT_SUCCESS: one skb was queued.
304 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
306{
307 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700308 /* We don't fill cb now as skb_unshare() may invalidate it */
309 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700310 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700312 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700314 /* Random duplication */
315 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
316 ++count;
317
stephen hemminger661b7972011-02-23 13:04:21 +0000318 /* Drop packet? */
319 if (loss_event(q))
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700320 --count;
321
322 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 sch->qstats.drops++;
324 kfree_skb(skb);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700325 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 }
327
David S. Miller4e8a5202006-10-22 21:00:33 -0700328 skb_orphan(skb);
329
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700330 /*
331 * If we need to duplicate packet, then re-insert at top of the
332 * qdisc tree, since parent queuer expects that only one
333 * skb will be queued.
334 */
335 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700336 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700337 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
338 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700340 qdisc_enqueue_root(skb2, rootq);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700341 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800344 /*
345 * Randomized packet corruption.
346 * Make copy if needed since we are modifying
347 * If packet is going to be hardware checksummed, then
348 * do it now in software before we mangle it.
349 */
350 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Joe Perchesf64f9e72009-11-29 16:55:45 -0800351 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
352 (skb->ip_summed == CHECKSUM_PARTIAL &&
353 skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800354 sch->qstats.drops++;
355 return NET_XMIT_DROP;
356 }
357
358 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
359 }
360
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700361 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000362 if (q->gap == 0 || /* not doing reordering */
363 q->counter < q->gap || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800364 q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700365 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800366 psched_tdiff_t delay;
367
368 delay = tabledist(q->latency, q->jitter,
369 &q->delay_cor, q->delay_dist);
370
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700371 now = psched_get_time();
Patrick McHardy7c59e252007-03-23 11:27:45 -0700372 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 ++q->counter;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700374 ret = qdisc_enqueue(skb, q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900376 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700377 * Do re-ordering by putting one out of N packets at the front
378 * of the queue.
379 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700380 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700381 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700382
383 __skb_queue_head(&q->qdisc->q, skb);
384 q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
385 q->qdisc->qstats.requeues++;
386 ret = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
388
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000389 if (ret != NET_XMIT_SUCCESS) {
390 if (net_xmit_drop_count(ret)) {
391 sch->qstats.drops++;
392 return ret;
393 }
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000396 sch->q.qlen++;
397 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000400static unsigned int netem_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
402 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800403 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Patrick McHardy6d037a22006-03-20 19:00:49 -0800405 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 sch->q.qlen--;
407 sch->qstats.drops++;
408 }
409 return len;
410}
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412static struct sk_buff *netem_dequeue(struct Qdisc *sch)
413{
414 struct netem_sched_data *q = qdisc_priv(sch);
415 struct sk_buff *skb;
416
Eric Dumazetfd245a42011-01-20 05:27:16 +0000417 if (qdisc_is_throttled(sch))
Stephen Hemminger11274e52007-03-22 12:17:42 -0700418 return NULL;
419
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700420 skb = q->qdisc->ops->peek(q->qdisc);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700421 if (skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700422 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700423 psched_time_t now = psched_get_time();
Stephen Hemminger771018e2005-05-03 16:24:32 -0700424
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700425 /* if more time remaining? */
Patrick McHardy104e0872007-03-23 11:28:07 -0700426 if (cb->time_to_send <= now) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700427 skb = qdisc_dequeue_peeked(q->qdisc);
428 if (unlikely(!skb))
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700429 return NULL;
430
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000431#ifdef CONFIG_NET_CLS_ACT
432 /*
433 * If it's at ingress let's pretend the delay is
434 * from the network (tstamp will be updated).
435 */
436 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
437 skb->tstamp.tv64 = 0;
438#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000439
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700440 sch->q.qlen--;
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000441 qdisc_unthrottled(sch);
442 qdisc_bstats_update(sch, skb);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700443 return skb;
444 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700445
Stephen Hemminger11274e52007-03-22 12:17:42 -0700446 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700447 }
448
449 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452static void netem_reset(struct Qdisc *sch)
453{
454 struct netem_sched_data *q = qdisc_priv(sch);
455
456 qdisc_reset(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 sch->q.qlen = 0;
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700458 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459}
460
stephen hemminger6373a9a2011-02-23 13:04:18 +0000461static void dist_free(struct disttable *d)
462{
463 if (d) {
464 if (is_vmalloc_addr(d))
465 vfree(d);
466 else
467 kfree(d);
468 }
469}
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471/*
472 * Distribution data is a variable size payload containing
473 * signed 16 bit values.
474 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800475static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
477 struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000478 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800479 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700480 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 struct disttable *d;
482 int i;
stephen hemminger6373a9a2011-02-23 13:04:18 +0000483 size_t s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000485 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 return -EINVAL;
487
stephen hemminger6373a9a2011-02-23 13:04:18 +0000488 s = sizeof(struct disttable) + n * sizeof(s16);
489 d = kmalloc(s, GFP_KERNEL);
490 if (!d)
491 d = vmalloc(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 if (!d)
493 return -ENOMEM;
494
495 d->size = n;
496 for (i = 0; i < n; i++)
497 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900498
Jarek Poplawski102396a2008-08-29 14:21:52 -0700499 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700500
501 spin_lock_bh(root_lock);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000502 dist_free(q->delay_dist);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800503 q->delay_dist = d;
David S. Miller7698b4f2008-07-16 01:42:40 -0700504 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return 0;
506}
507
Stephen Hemminger265eb672008-11-03 21:13:26 -0800508static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
510 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800511 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 init_crandom(&q->delay_cor, c->delay_corr);
514 init_crandom(&q->loss_cor, c->loss_corr);
515 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516}
517
Stephen Hemminger265eb672008-11-03 21:13:26 -0800518static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700519{
520 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800521 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700522
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700523 q->reorder = r->probability;
524 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700525}
526
Stephen Hemminger265eb672008-11-03 21:13:26 -0800527static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800528{
529 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800530 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800531
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800532 q->corrupt = r->probability;
533 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800534}
535
stephen hemminger661b7972011-02-23 13:04:21 +0000536static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
537{
538 struct netem_sched_data *q = qdisc_priv(sch);
539 const struct nlattr *la;
540 int rem;
541
542 nla_for_each_nested(la, attr, rem) {
543 u16 type = nla_type(la);
544
545 switch(type) {
546 case NETEM_LOSS_GI: {
547 const struct tc_netem_gimodel *gi = nla_data(la);
548
549 if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
550 pr_info("netem: incorrect gi model size\n");
551 return -EINVAL;
552 }
553
554 q->loss_model = CLG_4_STATES;
555
556 q->clg.state = 1;
557 q->clg.a1 = gi->p13;
558 q->clg.a2 = gi->p31;
559 q->clg.a3 = gi->p32;
560 q->clg.a4 = gi->p14;
561 q->clg.a5 = gi->p23;
562 break;
563 }
564
565 case NETEM_LOSS_GE: {
566 const struct tc_netem_gemodel *ge = nla_data(la);
567
568 if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
569 pr_info("netem: incorrect gi model size\n");
570 return -EINVAL;
571 }
572
573 q->loss_model = CLG_GILB_ELL;
574 q->clg.state = 1;
575 q->clg.a1 = ge->p;
576 q->clg.a2 = ge->r;
577 q->clg.a3 = ge->h;
578 q->clg.a4 = ge->k1;
579 break;
580 }
581
582 default:
583 pr_info("netem: unknown loss type %u\n", type);
584 return -EINVAL;
585 }
586 }
587
588 return 0;
589}
590
Patrick McHardy27a34212008-01-23 20:35:39 -0800591static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
592 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
593 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
594 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
stephen hemminger661b7972011-02-23 13:04:21 +0000595 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Patrick McHardy27a34212008-01-23 20:35:39 -0800596};
597
Thomas Graf2c10b322008-09-02 17:30:27 -0700598static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
599 const struct nla_policy *policy, int len)
600{
601 int nested_len = nla_len(nla) - NLA_ALIGN(len);
602
stephen hemminger661b7972011-02-23 13:04:21 +0000603 if (nested_len < 0) {
604 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700605 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000606 }
607
Thomas Graf2c10b322008-09-02 17:30:27 -0700608 if (nested_len >= nla_attr_size(0))
609 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
610 nested_len, policy);
stephen hemminger661b7972011-02-23 13:04:21 +0000611
Thomas Graf2c10b322008-09-02 17:30:27 -0700612 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
613 return 0;
614}
615
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800616/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800617static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
619 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800620 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 struct tc_netem_qopt *qopt;
622 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900623
Patrick McHardyb03f4672008-01-23 20:32:21 -0800624 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return -EINVAL;
626
Thomas Graf2c10b322008-09-02 17:30:27 -0700627 qopt = nla_data(opt);
628 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800629 if (ret < 0)
630 return ret;
631
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700632 ret = fifo_set_limit(q->qdisc, qopt->limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 if (ret) {
stephen hemminger250a65f2011-02-23 13:04:22 +0000634 pr_info("netem: can't set fifo limit\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return ret;
636 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 q->latency = qopt->latency;
639 q->jitter = qopt->jitter;
640 q->limit = qopt->limit;
641 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700642 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 q->loss = qopt->loss;
644 q->duplicate = qopt->duplicate;
645
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700646 /* for compatibility with earlier versions.
647 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700648 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700649 if (q->gap)
650 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700651
Stephen Hemminger265eb672008-11-03 21:13:26 -0800652 if (tb[TCA_NETEM_CORR])
653 get_correlation(sch, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Patrick McHardyb03f4672008-01-23 20:32:21 -0800655 if (tb[TCA_NETEM_DELAY_DIST]) {
656 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
657 if (ret)
658 return ret;
659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Stephen Hemminger265eb672008-11-03 21:13:26 -0800661 if (tb[TCA_NETEM_REORDER])
662 get_reorder(sch, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800663
Stephen Hemminger265eb672008-11-03 21:13:26 -0800664 if (tb[TCA_NETEM_CORRUPT])
665 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
stephen hemminger661b7972011-02-23 13:04:21 +0000667 q->loss_model = CLG_RANDOM;
668 if (tb[TCA_NETEM_LOSS])
669 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
670
671 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672}
673
Stephen Hemminger300ce172005-10-30 13:47:34 -0800674/*
675 * Special case version of FIFO queue for use by netem.
676 * It queues in order based on timestamps in skb's
677 */
678struct fifo_sched_data {
679 u32 limit;
Stephen Hemminger075aa572007-03-22 12:17:05 -0700680 psched_time_t oldest;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800681};
682
683static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
684{
685 struct fifo_sched_data *q = qdisc_priv(sch);
686 struct sk_buff_head *list = &sch->q;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700687 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800688 struct sk_buff *skb;
689
690 if (likely(skb_queue_len(list) < q->limit)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700691 /* Optimize for add at tail */
Patrick McHardy104e0872007-03-23 11:28:07 -0700692 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700693 q->oldest = tnext;
694 return qdisc_enqueue_tail(nskb, sch);
695 }
696
Stephen Hemminger300ce172005-10-30 13:47:34 -0800697 skb_queue_reverse_walk(list, skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700698 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800699
Patrick McHardy104e0872007-03-23 11:28:07 -0700700 if (tnext >= cb->time_to_send)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800701 break;
702 }
703
704 __skb_queue_after(list, skb, nskb);
705
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700706 sch->qstats.backlog += qdisc_pkt_len(nskb);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800707
708 return NET_XMIT_SUCCESS;
709 }
710
Stephen Hemminger075aa572007-03-22 12:17:05 -0700711 return qdisc_reshape_fail(nskb, sch);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800712}
713
Patrick McHardy1e904742008-01-22 22:11:17 -0800714static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800715{
716 struct fifo_sched_data *q = qdisc_priv(sch);
717
718 if (opt) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800719 struct tc_fifo_qopt *ctl = nla_data(opt);
720 if (nla_len(opt) < sizeof(*ctl))
Stephen Hemminger300ce172005-10-30 13:47:34 -0800721 return -EINVAL;
722
723 q->limit = ctl->limit;
724 } else
David S. Miller5ce2d482008-07-08 17:06:30 -0700725 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800726
Patrick McHardya0849802007-03-23 11:28:30 -0700727 q->oldest = PSCHED_PASTPERFECT;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800728 return 0;
729}
730
731static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
732{
733 struct fifo_sched_data *q = qdisc_priv(sch);
734 struct tc_fifo_qopt opt = { .limit = q->limit };
735
Patrick McHardy1e904742008-01-22 22:11:17 -0800736 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800737 return skb->len;
738
Patrick McHardy1e904742008-01-22 22:11:17 -0800739nla_put_failure:
Stephen Hemminger300ce172005-10-30 13:47:34 -0800740 return -1;
741}
742
Eric Dumazet20fea082007-11-14 01:44:41 -0800743static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
Stephen Hemminger300ce172005-10-30 13:47:34 -0800744 .id = "tfifo",
745 .priv_size = sizeof(struct fifo_sched_data),
746 .enqueue = tfifo_enqueue,
747 .dequeue = qdisc_dequeue_head,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700748 .peek = qdisc_peek_head,
Stephen Hemminger300ce172005-10-30 13:47:34 -0800749 .drop = qdisc_queue_drop,
750 .init = tfifo_init,
751 .reset = qdisc_reset_queue,
752 .change = tfifo_init,
753 .dump = tfifo_dump,
754};
755
Patrick McHardy1e904742008-01-22 22:11:17 -0800756static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
758 struct netem_sched_data *q = qdisc_priv(sch);
759 int ret;
760
761 if (!opt)
762 return -EINVAL;
763
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700764 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
stephen hemminger661b7972011-02-23 13:04:21 +0000766 q->loss_model = CLG_RANDOM;
Changli Gao3511c912010-10-16 13:04:08 +0000767 q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800768 TC_H_MAKE(sch->handle, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 if (!q->qdisc) {
stephen hemminger250a65f2011-02-23 13:04:22 +0000770 pr_notice("netem: qdisc create tfifo qdisc failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return -ENOMEM;
772 }
773
774 ret = netem_change(sch, opt);
775 if (ret) {
stephen hemminger250a65f2011-02-23 13:04:22 +0000776 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 qdisc_destroy(q->qdisc);
778 }
779 return ret;
780}
781
782static void netem_destroy(struct Qdisc *sch)
783{
784 struct netem_sched_data *q = qdisc_priv(sch);
785
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700786 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000788 dist_free(q->delay_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
stephen hemminger661b7972011-02-23 13:04:21 +0000791static int dump_loss_model(const struct netem_sched_data *q,
792 struct sk_buff *skb)
793{
794 struct nlattr *nest;
795
796 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
797 if (nest == NULL)
798 goto nla_put_failure;
799
800 switch (q->loss_model) {
801 case CLG_RANDOM:
802 /* legacy loss model */
803 nla_nest_cancel(skb, nest);
804 return 0; /* no data */
805
806 case CLG_4_STATES: {
807 struct tc_netem_gimodel gi = {
808 .p13 = q->clg.a1,
809 .p31 = q->clg.a2,
810 .p32 = q->clg.a3,
811 .p14 = q->clg.a4,
812 .p23 = q->clg.a5,
813 };
814
815 NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
816 break;
817 }
818 case CLG_GILB_ELL: {
819 struct tc_netem_gemodel ge = {
820 .p = q->clg.a1,
821 .r = q->clg.a2,
822 .h = q->clg.a3,
823 .k1 = q->clg.a4,
824 };
825
826 NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
827 break;
828 }
829 }
830
831 nla_nest_end(skb, nest);
832 return 0;
833
834nla_put_failure:
835 nla_nest_cancel(skb, nest);
836 return -1;
837}
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
840{
841 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +0000842 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 struct tc_netem_qopt qopt;
844 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700845 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800846 struct tc_netem_corrupt corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 qopt.latency = q->latency;
849 qopt.jitter = q->jitter;
850 qopt.limit = q->limit;
851 qopt.loss = q->loss;
852 qopt.gap = q->gap;
853 qopt.duplicate = q->duplicate;
Patrick McHardy1e904742008-01-22 22:11:17 -0800854 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 cor.delay_corr = q->delay_cor.rho;
857 cor.loss_corr = q->loss_cor.rho;
858 cor.dup_corr = q->dup_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800859 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700860
861 reorder.probability = q->reorder;
862 reorder.correlation = q->reorder_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800863 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700864
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800865 corrupt.probability = q->corrupt;
866 corrupt.correlation = q->corrupt_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800867 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800868
stephen hemminger661b7972011-02-23 13:04:21 +0000869 if (dump_loss_model(q, skb) != 0)
870 goto nla_put_failure;
871
stephen hemminger861d7f72011-02-23 13:04:17 +0000872 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Patrick McHardy1e904742008-01-22 22:11:17 -0800874nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +0000875 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 return -1;
877}
878
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000879static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
880 struct sk_buff *skb, struct tcmsg *tcm)
881{
882 struct netem_sched_data *q = qdisc_priv(sch);
883
884 if (cl != 1) /* only one class */
885 return -ENOENT;
886
887 tcm->tcm_handle |= TC_H_MIN(1);
888 tcm->tcm_info = q->qdisc->handle;
889
890 return 0;
891}
892
893static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
894 struct Qdisc **old)
895{
896 struct netem_sched_data *q = qdisc_priv(sch);
897
898 if (new == NULL)
899 new = &noop_qdisc;
900
901 sch_tree_lock(sch);
902 *old = q->qdisc;
903 q->qdisc = new;
904 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
905 qdisc_reset(*old);
906 sch_tree_unlock(sch);
907
908 return 0;
909}
910
911static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
912{
913 struct netem_sched_data *q = qdisc_priv(sch);
914 return q->qdisc;
915}
916
917static unsigned long netem_get(struct Qdisc *sch, u32 classid)
918{
919 return 1;
920}
921
922static void netem_put(struct Qdisc *sch, unsigned long arg)
923{
924}
925
926static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
927{
928 if (!walker->stop) {
929 if (walker->count >= walker->skip)
930 if (walker->fn(sch, 1, walker) < 0) {
931 walker->stop = 1;
932 return;
933 }
934 walker->count++;
935 }
936}
937
938static const struct Qdisc_class_ops netem_class_ops = {
939 .graft = netem_graft,
940 .leaf = netem_leaf,
941 .get = netem_get,
942 .put = netem_put,
943 .walk = netem_walk,
944 .dump = netem_dump_class,
945};
946
Eric Dumazet20fea082007-11-14 01:44:41 -0800947static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000949 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 .priv_size = sizeof(struct netem_sched_data),
951 .enqueue = netem_enqueue,
952 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700953 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 .drop = netem_drop,
955 .init = netem_init,
956 .reset = netem_reset,
957 .destroy = netem_destroy,
958 .change = netem_change,
959 .dump = netem_dump,
960 .owner = THIS_MODULE,
961};
962
963
964static int __init netem_module_init(void)
965{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800966 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 return register_qdisc(&netem_qdisc_ops);
968}
969static void __exit netem_module_exit(void)
970{
971 unregister_qdisc(&netem_qdisc_ops);
972}
973module_init(netem_module_init)
974module_exit(netem_module_exit)
975MODULE_LICENSE("GPL");