blob: 9c69585a1be80cc6e742c6aa0603e65c0e668111 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
10#include <net/gen_stats.h>
Thomas Grafbe577dd2007-03-22 11:55:50 -070011#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13struct Qdisc_ops;
14struct qdisc_walker;
15struct tcf_walker;
16struct module;
17
18struct qdisc_rate_table
19{
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
David S. Millere2627c82008-07-16 00:56:32 -070026enum qdisc_state_t
27{
28 __QDISC_STATE_RUNNING,
David S. Miller37437bb2008-07-16 02:15:04 -070029 __QDISC_STATE_SCHED,
David S. Millera9312ae2008-08-17 21:51:03 -070030 __QDISC_STATE_DEACTIVATED,
David S. Millere2627c82008-07-16 00:56:32 -070031};
32
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070033struct qdisc_size_table {
34 struct list_head list;
35 struct tc_sizespec szopts;
36 int refcnt;
37 u16 data[];
38};
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040struct Qdisc
41{
42 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
43 struct sk_buff * (*dequeue)(struct Qdisc *dev);
44 unsigned flags;
Jarek Poplawskib00355d2009-02-01 01:12:42 -080045#define TCQ_F_BUILTIN 1
46#define TCQ_F_THROTTLED 2
47#define TCQ_F_INGRESS 4
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +000048#define TCQ_F_CAN_BYPASS 8
Jarek Poplawskib00355d2009-02-01 01:12:42 -080049#define TCQ_F_WARN_NONWC (1 << 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 int padded;
51 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070052 struct qdisc_size_table *stab;
Eric Dumazet5e140df2009-03-20 01:33:32 -070053 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 u32 handle;
55 u32 parent;
56 atomic_t refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 struct gnet_stats_rate_est rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 int (*reshape_fail)(struct sk_buff *skb,
59 struct Qdisc *q);
60
David S. Miller72b25a92008-07-18 20:54:17 -070061 void *u32_node;
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 /* This field is deprecated, but it is still used by CBQ
64 * and it will live until better solution will be invented.
65 */
66 struct Qdisc *__parent;
Eric Dumazet5e140df2009-03-20 01:33:32 -070067 struct netdev_queue *dev_queue;
68 struct Qdisc *next_sched;
69
70 struct sk_buff *gso_skb;
71 /*
72 * For performance sake on SMP, we put highly modified fields at the end
73 */
74 unsigned long state;
75 struct sk_buff_head q;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +000076 struct gnet_stats_basic_packed bstats;
Eric Dumazet5e140df2009-03-20 01:33:32 -070077 struct gnet_stats_queue qstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078};
79
80struct Qdisc_class_ops
81{
82 /* Child qdisc manipulation */
David S. Miller6ec1c692009-09-06 01:58:51 -070083 unsigned int (*select_queue)(struct Qdisc *, struct tcmsg *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 int (*graft)(struct Qdisc *, unsigned long cl,
85 struct Qdisc *, struct Qdisc **);
86 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
Patrick McHardy43effa12006-11-29 17:35:48 -080087 void (*qlen_notify)(struct Qdisc *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 /* Class manipulation routines */
90 unsigned long (*get)(struct Qdisc *, u32 classid);
91 void (*put)(struct Qdisc *, unsigned long);
92 int (*change)(struct Qdisc *, u32, u32,
Patrick McHardy1e904742008-01-22 22:11:17 -080093 struct nlattr **, unsigned long *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 int (*delete)(struct Qdisc *, unsigned long);
95 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
96
97 /* Filter manipulation */
98 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
99 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
100 u32 classid);
101 void (*unbind_tcf)(struct Qdisc *, unsigned long);
102
103 /* rtnetlink specific */
104 int (*dump)(struct Qdisc *, unsigned long,
105 struct sk_buff *skb, struct tcmsg*);
106 int (*dump_stats)(struct Qdisc *, unsigned long,
107 struct gnet_dump *);
108};
109
110struct Qdisc_ops
111{
112 struct Qdisc_ops *next;
Eric Dumazet20fea082007-11-14 01:44:41 -0800113 const struct Qdisc_class_ops *cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 char id[IFNAMSIZ];
115 int priv_size;
116
117 int (*enqueue)(struct sk_buff *, struct Qdisc *);
118 struct sk_buff * (*dequeue)(struct Qdisc *);
Jarek Poplawski90d841fd2008-10-31 00:43:45 -0700119 struct sk_buff * (*peek)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 unsigned int (*drop)(struct Qdisc *);
121
Patrick McHardy1e904742008-01-22 22:11:17 -0800122 int (*init)(struct Qdisc *, struct nlattr *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 void (*reset)(struct Qdisc *);
124 void (*destroy)(struct Qdisc *);
Patrick McHardy1e904742008-01-22 22:11:17 -0800125 int (*change)(struct Qdisc *, struct nlattr *arg);
David S. Miller6ec1c692009-09-06 01:58:51 -0700126 void (*attach)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 int (*dump)(struct Qdisc *, struct sk_buff *);
129 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
130
131 struct module *owner;
132};
133
134
135struct tcf_result
136{
137 unsigned long class;
138 u32 classid;
139};
140
141struct tcf_proto_ops
142{
143 struct tcf_proto_ops *next;
144 char kind[IFNAMSIZ];
145
146 int (*classify)(struct sk_buff*, struct tcf_proto*,
147 struct tcf_result *);
148 int (*init)(struct tcf_proto*);
149 void (*destroy)(struct tcf_proto*);
150
151 unsigned long (*get)(struct tcf_proto*, u32 handle);
152 void (*put)(struct tcf_proto*, unsigned long);
153 int (*change)(struct tcf_proto*, unsigned long,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800154 u32 handle, struct nlattr **,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 unsigned long *);
156 int (*delete)(struct tcf_proto*, unsigned long);
157 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
158
159 /* rtnetlink specific */
160 int (*dump)(struct tcf_proto*, unsigned long,
161 struct sk_buff *skb, struct tcmsg*);
162
163 struct module *owner;
164};
165
166struct tcf_proto
167{
168 /* Fast access part */
169 struct tcf_proto *next;
170 void *root;
171 int (*classify)(struct sk_buff*, struct tcf_proto*,
172 struct tcf_result *);
Al Viro66c6f522006-11-20 18:07:51 -0800173 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175 /* All the rest */
176 u32 prio;
177 u32 classid;
178 struct Qdisc *q;
179 void *data;
180 struct tcf_proto_ops *ops;
181};
182
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700183struct qdisc_skb_cb {
184 unsigned int pkt_len;
185 char data[];
186};
187
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000188static inline int qdisc_qlen(struct Qdisc *q)
189{
190 return q->q.qlen;
191}
192
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700193static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
194{
195 return (struct qdisc_skb_cb *)skb->cb;
196}
197
David S. Miller83874002008-07-17 00:53:03 -0700198static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
199{
200 return &qdisc->q.lock;
201}
202
David S. Miller7698b4f2008-07-16 01:42:40 -0700203static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
204{
205 return qdisc->dev_queue->qdisc;
206}
207
Jarek Poplawski2540e052008-08-21 05:11:14 -0700208static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
209{
210 return qdisc->dev_queue->qdisc_sleeping;
211}
212
David S. Miller7e43f112008-08-02 23:27:37 -0700213/* The qdisc root lock is a mechanism by which to top level
214 * of a qdisc tree can be locked from any qdisc node in the
215 * forest. This allows changing the configuration of some
216 * aspect of the qdisc tree while blocking out asynchronous
217 * qdisc access in the packet processing paths.
218 *
219 * It is only legal to do this when the root will not change
220 * on us. Otherwise we'll potentially lock the wrong qdisc
221 * root. This is enforced by holding the RTNL semaphore, which
222 * all users of this lock accessor must do.
223 */
David S. Miller7698b4f2008-07-16 01:42:40 -0700224static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
225{
226 struct Qdisc *root = qdisc_root(qdisc);
227
David S. Miller7e43f112008-08-02 23:27:37 -0700228 ASSERT_RTNL();
David S. Miller83874002008-07-17 00:53:03 -0700229 return qdisc_lock(root);
David S. Miller7698b4f2008-07-16 01:42:40 -0700230}
231
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700232static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
233{
234 struct Qdisc *root = qdisc_root_sleeping(qdisc);
235
236 ASSERT_RTNL();
237 return qdisc_lock(root);
238}
239
David S. Miller5ce2d482008-07-08 17:06:30 -0700240static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
241{
242 return qdisc->dev_queue->dev;
243}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
David S. Miller78a5b302008-07-16 03:12:24 -0700245static inline void sch_tree_lock(struct Qdisc *q)
246{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700247 spin_lock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700248}
249
250static inline void sch_tree_unlock(struct Qdisc *q)
251{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700252 spin_unlock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700253}
254
255#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
256#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Thomas Grafe41a33e2005-07-05 14:14:30 -0700258extern struct Qdisc noop_qdisc;
259extern struct Qdisc_ops noop_qdisc_ops;
David S. Miller6ec1c692009-09-06 01:58:51 -0700260extern struct Qdisc_ops pfifo_fast_ops;
261extern struct Qdisc_ops mq_qdisc_ops;
Thomas Grafe41a33e2005-07-05 14:14:30 -0700262
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700263struct Qdisc_class_common
264{
265 u32 classid;
266 struct hlist_node hnode;
267};
268
269struct Qdisc_class_hash
270{
271 struct hlist_head *hash;
272 unsigned int hashsize;
273 unsigned int hashmask;
274 unsigned int hashelems;
275};
276
277static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
278{
279 id ^= id >> 8;
280 id ^= id >> 4;
281 return id & mask;
282}
283
284static inline struct Qdisc_class_common *
285qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
286{
287 struct Qdisc_class_common *cl;
288 struct hlist_node *n;
289 unsigned int h;
290
291 h = qdisc_class_hash(id, hash->hashmask);
292 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
293 if (cl->classid == id)
294 return cl;
295 }
296 return NULL;
297}
298
299extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
300extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
301extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
302extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
303extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
304
Thomas Grafe41a33e2005-07-05 14:14:30 -0700305extern void dev_init_scheduler(struct net_device *dev);
306extern void dev_shutdown(struct net_device *dev);
307extern void dev_activate(struct net_device *dev);
308extern void dev_deactivate(struct net_device *dev);
Patrick McHardy589983c2009-09-04 06:41:20 +0000309extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
310 struct Qdisc *qdisc);
Thomas Grafe41a33e2005-07-05 14:14:30 -0700311extern void qdisc_reset(struct Qdisc *qdisc);
312extern void qdisc_destroy(struct Qdisc *qdisc);
Patrick McHardy43effa12006-11-29 17:35:48 -0800313extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
David S. Miller5ce2d482008-07-08 17:06:30 -0700314extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700315 struct Qdisc_ops *ops);
Thomas Grafe41a33e2005-07-05 14:14:30 -0700316extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
David S. Millerbb949fb2008-07-08 16:55:56 -0700317 struct netdev_queue *dev_queue,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800318 struct Qdisc_ops *ops, u32 parentid);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700319extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
320 struct qdisc_size_table *stab);
Patrick McHardya48b5a62007-03-23 11:29:43 -0700321extern void tcf_destroy(struct tcf_proto *tp);
Patrick McHardyff31ab52008-07-01 19:52:38 -0700322extern void tcf_destroy_chain(struct tcf_proto **fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
David S. Miller5aa70992008-07-08 22:59:10 -0700324/* Reset all TX qdiscs of a device. */
325static inline void qdisc_reset_all_tx(struct net_device *dev)
326{
David S. Millere8a04642008-07-17 00:34:19 -0700327 unsigned int i;
328 for (i = 0; i < dev->num_tx_queues; i++)
329 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
David S. Miller5aa70992008-07-08 22:59:10 -0700330}
331
David S. Miller3e745dd2008-07-08 23:00:25 -0700332/* Are all TX queues of the device empty? */
333static inline bool qdisc_all_tx_empty(const struct net_device *dev)
334{
David S. Millere8a04642008-07-17 00:34:19 -0700335 unsigned int i;
336 for (i = 0; i < dev->num_tx_queues; i++) {
337 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
338 const struct Qdisc *q = txq->qdisc;
David S. Miller3e745dd2008-07-08 23:00:25 -0700339
David S. Millere8a04642008-07-17 00:34:19 -0700340 if (q->q.qlen)
341 return false;
342 }
343 return true;
David S. Miller3e745dd2008-07-08 23:00:25 -0700344}
345
David S. Miller6fa98642008-07-08 23:01:06 -0700346/* Are any of the TX qdiscs changing? */
347static inline bool qdisc_tx_changing(struct net_device *dev)
348{
David S. Millere8a04642008-07-17 00:34:19 -0700349 unsigned int i;
350 for (i = 0; i < dev->num_tx_queues; i++) {
351 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
352 if (txq->qdisc != txq->qdisc_sleeping)
353 return true;
354 }
355 return false;
David S. Miller6fa98642008-07-08 23:01:06 -0700356}
357
David S. Millere8a04642008-07-17 00:34:19 -0700358/* Is the device using the noop qdisc on all queues? */
David S. Miller05297942008-07-08 23:01:27 -0700359static inline bool qdisc_tx_is_noop(const struct net_device *dev)
360{
David S. Millere8a04642008-07-17 00:34:19 -0700361 unsigned int i;
362 for (i = 0; i < dev->num_tx_queues; i++) {
363 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
364 if (txq->qdisc != &noop_qdisc)
365 return false;
366 }
367 return true;
David S. Miller05297942008-07-08 23:01:27 -0700368}
369
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700370static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
371{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700372 return qdisc_skb_cb(skb)->pkt_len;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700373}
374
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700375/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700376enum net_xmit_qdisc_t {
377 __NET_XMIT_STOLEN = 0x00010000,
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700378 __NET_XMIT_BYPASS = 0x00020000,
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700379};
380
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700381#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700382#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700383#else
384#define net_xmit_drop_count(e) (1)
385#endif
386
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700387static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
388{
David S. Miller3a682fb2008-07-20 18:13:01 -0700389#ifdef CONFIG_NET_SCHED
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700390 if (sch->stab)
391 qdisc_calculate_pkt_len(skb, sch->stab);
David S. Miller3a682fb2008-07-20 18:13:01 -0700392#endif
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700393 return sch->enqueue(skb, sch);
394}
395
396static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
397{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700398 qdisc_skb_cb(skb)->pkt_len = skb->len;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700399 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700400}
401
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000402static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
403{
404 sch->bstats.bytes += len;
405 sch->bstats.packets++;
406}
407
Thomas Graf9972b252005-06-18 22:57:26 -0700408static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
409 struct sk_buff_head *list)
410{
411 __skb_queue_tail(list, skb);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700412 sch->qstats.backlog += qdisc_pkt_len(skb);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000413 __qdisc_update_bstats(sch, qdisc_pkt_len(skb));
Thomas Graf9972b252005-06-18 22:57:26 -0700414
415 return NET_XMIT_SUCCESS;
416}
417
418static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
419{
420 return __qdisc_enqueue_tail(skb, sch, &sch->q);
421}
422
423static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
424 struct sk_buff_head *list)
425{
426 struct sk_buff *skb = __skb_dequeue(list);
427
428 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700429 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700430
431 return skb;
432}
433
434static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
435{
436 return __qdisc_dequeue_head(sch, &sch->q);
437}
438
439static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
440 struct sk_buff_head *list)
441{
442 struct sk_buff *skb = __skb_dequeue_tail(list);
443
444 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700445 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700446
447 return skb;
448}
449
450static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
451{
452 return __qdisc_dequeue_tail(sch, &sch->q);
453}
454
Patrick McHardy48a8f512008-10-31 00:44:18 -0700455static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
456{
457 return skb_peek(&sch->q);
458}
459
Jarek Poplawski77be1552008-10-31 00:47:01 -0700460/* generic pseudo peek method for non-work-conserving qdisc */
461static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
462{
463 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800464 if (!sch->gso_skb) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700465 sch->gso_skb = sch->dequeue(sch);
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800466 if (sch->gso_skb)
467 /* it's still part of the queue */
468 sch->q.qlen++;
469 }
Jarek Poplawski77be1552008-10-31 00:47:01 -0700470
471 return sch->gso_skb;
472}
473
474/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
475static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
476{
477 struct sk_buff *skb = sch->gso_skb;
478
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800479 if (skb) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700480 sch->gso_skb = NULL;
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800481 sch->q.qlen--;
482 } else {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700483 skb = sch->dequeue(sch);
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800484 }
Jarek Poplawski77be1552008-10-31 00:47:01 -0700485
486 return skb;
487}
488
Thomas Graf9972b252005-06-18 22:57:26 -0700489static inline void __qdisc_reset_queue(struct Qdisc *sch,
490 struct sk_buff_head *list)
491{
492 /*
493 * We do not know the backlog in bytes of this list, it
494 * is up to the caller to correct it
495 */
David S. Miller93245dd2008-07-17 04:03:43 -0700496 __skb_queue_purge(list);
Thomas Graf9972b252005-06-18 22:57:26 -0700497}
498
499static inline void qdisc_reset_queue(struct Qdisc *sch)
500{
501 __qdisc_reset_queue(sch, &sch->q);
502 sch->qstats.backlog = 0;
503}
504
505static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
506 struct sk_buff_head *list)
507{
508 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
509
510 if (likely(skb != NULL)) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700511 unsigned int len = qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700512 kfree_skb(skb);
513 return len;
514 }
515
516 return 0;
517}
518
519static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
520{
521 return __qdisc_queue_drop(sch, &sch->q);
522}
523
524static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
525{
526 kfree_skb(skb);
527 sch->qstats.drops++;
528
529 return NET_XMIT_DROP;
530}
531
532static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
533{
534 sch->qstats.drops++;
535
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700536#ifdef CONFIG_NET_CLS_ACT
Thomas Graf9972b252005-06-18 22:57:26 -0700537 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
538 goto drop;
539
540 return NET_XMIT_SUCCESS;
541
542drop:
543#endif
544 kfree_skb(skb);
545 return NET_XMIT_DROP;
546}
547
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200548/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
549 long it will take to send a packet given its size.
550 */
551static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
552{
Jesper Dangaard Brouere08b0992007-09-12 16:36:28 +0200553 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
554 if (slot < 0)
555 slot = 0;
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200556 slot >>= rtab->rate.cell_log;
557 if (slot > 255)
558 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
559 return rtab->data[slot];
560}
561
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700562#ifdef CONFIG_NET_CLS_ACT
563static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
564{
565 struct sk_buff *n = skb_clone(skb, gfp_mask);
566
567 if (n) {
568 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
569 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
570 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700571 }
572 return n;
573}
574#endif
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576#endif