blob: 97b0d2d1a6b04146f21dd164d5d08fd50b599ef0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/cache.h>
21
22#include <asm/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
25#include <linux/mm.h>
26#include <linux/highmem.h>
27#include <linux/poll.h>
28#include <linux/net.h>
Thomas Graf3fc7e8a2005-06-23 21:00:17 -070029#include <linux/textsearch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <net/checksum.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070031#include <linux/dmaengine.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#define HAVE_ALLOC_SKB /* For the drivers to know */
34#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#define CHECKSUM_NONE 0
37#define CHECKSUM_HW 1
38#define CHECKSUM_UNNECESSARY 2
39
40#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
41 ~(SMP_CACHE_BYTES - 1))
42#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
43 sizeof(struct skb_shared_info)) & \
44 ~(SMP_CACHE_BYTES - 1))
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47
48/* A. Checksumming of received packets by device.
49 *
50 * NONE: device failed to checksum this packet.
51 * skb->csum is undefined.
52 *
53 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
54 * skb->csum is undefined.
55 * It is bad option, but, unfortunately, many of vendors do this.
56 * Apparently with secret goal to sell you new device, when you
57 * will add new protocol to your host. F.e. IPv6. 8)
58 *
59 * HW: the most generic way. Device supplied checksum of _all_
60 * the packet as seen by netif_rx in skb->csum.
61 * NOTE: Even if device supports only some protocols, but
62 * is able to produce some skb->csum, it MUST use HW,
63 * not UNNECESSARY.
64 *
65 * B. Checksumming on output.
66 *
67 * NONE: skb is checksummed by protocol or csum is not required.
68 *
69 * HW: device is required to csum packet as seen by hard_start_xmit
70 * from skb->h.raw to the end and to record the checksum
71 * at skb->h.raw+skb->csum.
72 *
73 * Device must show its capabilities in dev->features, set
74 * at device setup time.
75 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
76 * everything.
77 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
78 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
79 * TCP/UDP over IPv4. Sigh. Vendors like this
80 * way by an unknown reason. Though, see comment above
81 * about CHECKSUM_UNNECESSARY. 8)
82 *
83 * Any questions? No questions, good. --ANK
84 */
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086struct net_device;
87
88#ifdef CONFIG_NETFILTER
89struct nf_conntrack {
90 atomic_t use;
91 void (*destroy)(struct nf_conntrack *);
92};
93
94#ifdef CONFIG_BRIDGE_NETFILTER
95struct nf_bridge_info {
96 atomic_t use;
97 struct net_device *physindev;
98 struct net_device *physoutdev;
99#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
100 struct net_device *netoutdev;
101#endif
102 unsigned int mask;
103 unsigned long data[32 / sizeof(unsigned long)];
104};
105#endif
106
107#endif
108
109struct sk_buff_head {
110 /* These two members must be first. */
111 struct sk_buff *next;
112 struct sk_buff *prev;
113
114 __u32 qlen;
115 spinlock_t lock;
116};
117
118struct sk_buff;
119
120/* To allow 64K frame to be packed as single skb without frag_list */
121#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
122
123typedef struct skb_frag_struct skb_frag_t;
124
125struct skb_frag_struct {
126 struct page *page;
127 __u16 page_offset;
128 __u16 size;
129};
130
131/* This data is invariant across clones and lives at
132 * the end of the header data, ie. at skb->end.
133 */
134struct skb_shared_info {
135 atomic_t dataref;
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800136 unsigned short nr_frags;
Herbert Xu79671682006-06-22 02:40:14 -0700137 unsigned short gso_size;
138 /* Warning: this field is not always filled in (UFO)! */
139 unsigned short gso_segs;
140 unsigned short gso_type;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700141 unsigned int ip6_frag_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 struct sk_buff *frag_list;
143 skb_frag_t frags[MAX_SKB_FRAGS];
144};
145
146/* We divide dataref into two halves. The higher 16 bits hold references
147 * to the payload part of skb->data. The lower 16 bits hold references to
148 * the entire skb->data. It is up to the users of the skb to agree on
149 * where the payload starts.
150 *
151 * All users must obey the rule that the skb->data reference count must be
152 * greater than or equal to the payload reference count.
153 *
154 * Holding a reference to the payload part means that the user does not
155 * care about modifications to the header part of skb->data.
156 */
157#define SKB_DATAREF_SHIFT 16
158#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
159
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700160struct skb_timeval {
161 u32 off_sec;
162 u32 off_usec;
163};
164
David S. Millerd179cd12005-08-17 14:57:30 -0700165
166enum {
167 SKB_FCLONE_UNAVAILABLE,
168 SKB_FCLONE_ORIG,
169 SKB_FCLONE_CLONE,
170};
171
Herbert Xu79671682006-06-22 02:40:14 -0700172enum {
173 SKB_GSO_TCPV4 = 1 << 0,
174 SKB_GSO_UDPV4 = 1 << 1,
175};
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/**
178 * struct sk_buff - socket buffer
179 * @next: Next buffer in list
180 * @prev: Previous buffer in list
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 * @sk: Socket we are owned by
Herbert Xu325ed822005-10-03 13:57:23 -0700182 * @tstamp: Time we arrived
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * @dev: Device we arrived on/are leaving by
184 * @input_dev: Device we arrived on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * @h: Transport layer header
186 * @nh: Network layer header
187 * @mac: Link layer header
Martin Waitz67be2dd2005-05-01 08:59:26 -0700188 * @dst: destination entry
189 * @sp: the security path, used for xfrm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 * @cb: Control buffer. Free for use by every layer. Put private vars here
191 * @len: Length of actual data
192 * @data_len: Data length
193 * @mac_len: Length of link layer header
194 * @csum: Checksum
Martin Waitz67be2dd2005-05-01 08:59:26 -0700195 * @local_df: allow local fragmentation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 * @cloned: Head may be cloned (check refcnt to be sure)
197 * @nohdr: Payload reference only, must not modify header
198 * @pkt_type: Packet class
Randy Dunlapc83c2482005-10-18 22:07:41 -0700199 * @fclone: skbuff clone status
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * @ip_summed: Driver fed us an IP checksum
201 * @priority: Packet queueing priority
202 * @users: User count - see {datagram,tcp}.c
203 * @protocol: Packet protocol from driver
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 * @truesize: Buffer size
205 * @head: Head of buffer
206 * @data: Data head pointer
207 * @tail: Tail pointer
208 * @end: End pointer
209 * @destructor: Destruct function
210 * @nfmark: Can be used for communication between hooks
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * @nfct: Associated connection, if any
Randy Dunlapc83c2482005-10-18 22:07:41 -0700212 * @ipvs_property: skbuff is owned by ipvs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 * @nfctinfo: Relationship of this skb to the connection
Randy Dunlap461ddf32005-11-20 21:25:15 -0800214 * @nfct_reasm: netfilter conntrack re-assembly pointer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 * @tc_index: Traffic control index
217 * @tc_verd: traffic control verdict
James Morris984bc162006-06-09 00:29:17 -0700218 * @secmark: security marking
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 */
220
221struct sk_buff {
222 /* These two members must be first. */
223 struct sk_buff *next;
224 struct sk_buff *prev;
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 struct sock *sk;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700227 struct skb_timeval tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 struct net_device *dev;
229 struct net_device *input_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 union {
232 struct tcphdr *th;
233 struct udphdr *uh;
234 struct icmphdr *icmph;
235 struct igmphdr *igmph;
236 struct iphdr *ipiph;
237 struct ipv6hdr *ipv6h;
238 unsigned char *raw;
239 } h;
240
241 union {
242 struct iphdr *iph;
243 struct ipv6hdr *ipv6h;
244 struct arphdr *arph;
245 unsigned char *raw;
246 } nh;
247
248 union {
249 unsigned char *raw;
250 } mac;
251
252 struct dst_entry *dst;
253 struct sec_path *sp;
254
255 /*
256 * This is the control buffer. It is free to use for every
257 * layer. Please put your private variables there. If you
258 * want to keep them across layers you have to do a skb_clone()
259 * first. This is owned by whoever has the skb queued ATM.
260 */
Patrick McHardy3e3850e2006-01-06 23:04:54 -0800261 char cb[48];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 unsigned int len,
264 data_len,
265 mac_len,
266 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 __u32 priority;
Thomas Graf1cbb3382005-07-05 14:13:41 -0700268 __u8 local_df:1,
269 cloned:1,
270 ip_summed:2,
Harald Welte6869c4d2005-08-09 19:24:19 -0700271 nohdr:1,
272 nfctinfo:3;
David S. Millerd179cd12005-08-17 14:57:30 -0700273 __u8 pkt_type:3,
Patrick McHardyb84f4cc2005-11-20 21:19:21 -0800274 fclone:2,
275 ipvs_property:1;
Alexey Dobriyana0d3bea2005-08-11 16:05:50 -0700276 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278 void (*destructor)(struct sk_buff *skb);
279#ifdef CONFIG_NETFILTER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 struct nf_conntrack *nfct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800281#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
282 struct sk_buff *nfct_reasm;
283#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284#ifdef CONFIG_BRIDGE_NETFILTER
285 struct nf_bridge_info *nf_bridge;
286#endif
Patrick McHardy77d2ca32006-03-20 17:12:12 -0800287 __u32 nfmark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#endif /* CONFIG_NETFILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289#ifdef CONFIG_NET_SCHED
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700290 __u16 tc_index; /* traffic control index */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291#ifdef CONFIG_NET_CLS_ACT
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700292 __u16 tc_verd; /* traffic control verdict */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294#endif
Chris Leech97fc2f02006-05-23 17:55:33 -0700295#ifdef CONFIG_NET_DMA
296 dma_cookie_t dma_cookie;
297#endif
James Morris984bc162006-06-09 00:29:17 -0700298#ifdef CONFIG_NETWORK_SECMARK
299 __u32 secmark;
300#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302
303 /* These elements must be at the end, see alloc_skb() for details. */
304 unsigned int truesize;
305 atomic_t users;
306 unsigned char *head,
307 *data,
308 *tail,
309 *end;
310};
311
312#ifdef __KERNEL__
313/*
314 * Handling routines are only of interest to the kernel
315 */
316#include <linux/slab.h>
317
318#include <asm/system.h>
319
Jörn Engel231d06a2006-03-20 21:28:35 -0800320extern void kfree_skb(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321extern void __kfree_skb(struct sk_buff *skb);
David S. Millerd179cd12005-08-17 14:57:30 -0700322extern struct sk_buff *__alloc_skb(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100323 gfp_t priority, int fclone);
David S. Millerd179cd12005-08-17 14:57:30 -0700324static inline struct sk_buff *alloc_skb(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100325 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700326{
327 return __alloc_skb(size, priority, 0);
328}
329
330static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100331 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700332{
333 return __alloc_skb(size, priority, 1);
334}
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
Victor Fusco86a76ca2005-07-08 14:57:47 -0700337 unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100338 gfp_t priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339extern void kfree_skbmem(struct sk_buff *skb);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700340extern struct sk_buff *skb_clone(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100341 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700342extern struct sk_buff *skb_copy(const struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100343 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700344extern struct sk_buff *pskb_copy(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100345 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346extern int pskb_expand_head(struct sk_buff *skb,
Victor Fusco86a76ca2005-07-08 14:57:47 -0700347 int nhead, int ntail,
Al Virodd0fc662005-10-07 07:46:04 +0100348 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
350 unsigned int headroom);
351extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
352 int newheadroom, int newtailroom,
Al Virodd0fc662005-10-07 07:46:04 +0100353 gfp_t priority);
Herbert Xu5b057c62006-06-23 02:06:41 -0700354extern int skb_pad(struct sk_buff *skb, int pad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355#define dev_kfree_skb(a) kfree_skb(a)
356extern void skb_over_panic(struct sk_buff *skb, int len,
357 void *here);
358extern void skb_under_panic(struct sk_buff *skb, int len,
359 void *here);
David S. Millerdc6de332006-04-20 00:10:50 -0700360extern void skb_truesize_bug(struct sk_buff *skb);
361
362static inline void skb_truesize_check(struct sk_buff *skb)
363{
364 if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
365 skb_truesize_bug(skb);
366}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700368extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
369 int getfrag(void *from, char *to, int offset,
370 int len,int odd, struct sk_buff *skb),
371 void *from, int length);
372
Thomas Graf677e90e2005-06-23 20:59:51 -0700373struct skb_seq_state
374{
375 __u32 lower_offset;
376 __u32 upper_offset;
377 __u32 frag_idx;
378 __u32 stepped_offset;
379 struct sk_buff *root_skb;
380 struct sk_buff *cur_skb;
381 __u8 *frag_data;
382};
383
384extern void skb_prepare_seq_read(struct sk_buff *skb,
385 unsigned int from, unsigned int to,
386 struct skb_seq_state *st);
387extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
388 struct skb_seq_state *st);
389extern void skb_abort_seq_read(struct skb_seq_state *st);
390
Thomas Graf3fc7e8a2005-06-23 21:00:17 -0700391extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
392 unsigned int to, struct ts_config *config,
393 struct ts_state *state);
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/* Internal */
396#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
397
398/**
399 * skb_queue_empty - check if a queue is empty
400 * @list: queue head
401 *
402 * Returns true if the queue is empty, false otherwise.
403 */
404static inline int skb_queue_empty(const struct sk_buff_head *list)
405{
406 return list->next == (struct sk_buff *)list;
407}
408
409/**
410 * skb_get - reference buffer
411 * @skb: buffer to reference
412 *
413 * Makes another reference to a socket buffer and returns a pointer
414 * to the buffer.
415 */
416static inline struct sk_buff *skb_get(struct sk_buff *skb)
417{
418 atomic_inc(&skb->users);
419 return skb;
420}
421
422/*
423 * If users == 1, we are the only owner and are can avoid redundant
424 * atomic change.
425 */
426
427/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 * skb_cloned - is the buffer a clone
429 * @skb: buffer to check
430 *
431 * Returns true if the buffer was generated with skb_clone() and is
432 * one of multiple shared copies of the buffer. Cloned buffers are
433 * shared data so must not be written to under normal circumstances.
434 */
435static inline int skb_cloned(const struct sk_buff *skb)
436{
437 return skb->cloned &&
438 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
439}
440
441/**
442 * skb_header_cloned - is the header a clone
443 * @skb: buffer to check
444 *
445 * Returns true if modifying the header part of the buffer requires
446 * the data to be copied.
447 */
448static inline int skb_header_cloned(const struct sk_buff *skb)
449{
450 int dataref;
451
452 if (!skb->cloned)
453 return 0;
454
455 dataref = atomic_read(&skb_shinfo(skb)->dataref);
456 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
457 return dataref != 1;
458}
459
460/**
461 * skb_header_release - release reference to header
462 * @skb: buffer to operate on
463 *
464 * Drop a reference to the header part of the buffer. This is done
465 * by acquiring a payload reference. You must not read from the header
466 * part of skb->data after this.
467 */
468static inline void skb_header_release(struct sk_buff *skb)
469{
470 BUG_ON(skb->nohdr);
471 skb->nohdr = 1;
472 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
473}
474
475/**
476 * skb_shared - is the buffer shared
477 * @skb: buffer to check
478 *
479 * Returns true if more than one person has a reference to this
480 * buffer.
481 */
482static inline int skb_shared(const struct sk_buff *skb)
483{
484 return atomic_read(&skb->users) != 1;
485}
486
487/**
488 * skb_share_check - check if buffer is shared and if so clone it
489 * @skb: buffer to check
490 * @pri: priority for memory allocation
491 *
492 * If the buffer is shared the buffer is cloned and the old copy
493 * drops a reference. A new clone with a single reference is returned.
494 * If the buffer is not shared the original buffer is returned. When
495 * being called from interrupt status or with spinlocks held pri must
496 * be GFP_ATOMIC.
497 *
498 * NULL is returned on a memory allocation failure.
499 */
Victor Fusco86a76ca2005-07-08 14:57:47 -0700500static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100501 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
503 might_sleep_if(pri & __GFP_WAIT);
504 if (skb_shared(skb)) {
505 struct sk_buff *nskb = skb_clone(skb, pri);
506 kfree_skb(skb);
507 skb = nskb;
508 }
509 return skb;
510}
511
512/*
513 * Copy shared buffers into a new sk_buff. We effectively do COW on
514 * packets to handle cases where we have a local reader and forward
515 * and a couple of other messy ones. The normal one is tcpdumping
516 * a packet thats being forwarded.
517 */
518
519/**
520 * skb_unshare - make a copy of a shared buffer
521 * @skb: buffer to check
522 * @pri: priority for memory allocation
523 *
524 * If the socket buffer is a clone then this function creates a new
525 * copy of the data, drops a reference count on the old copy and returns
526 * the new copy with the reference count at 1. If the buffer is not a clone
527 * the original buffer is returned. When called with a spinlock held or
528 * from interrupt state @pri must be %GFP_ATOMIC
529 *
530 * %NULL is returned on a memory allocation failure.
531 */
Victor Fuscoe2bf5212005-07-18 13:36:38 -0700532static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100533 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534{
535 might_sleep_if(pri & __GFP_WAIT);
536 if (skb_cloned(skb)) {
537 struct sk_buff *nskb = skb_copy(skb, pri);
538 kfree_skb(skb); /* Free our shared copy */
539 skb = nskb;
540 }
541 return skb;
542}
543
544/**
545 * skb_peek
546 * @list_: list to peek at
547 *
548 * Peek an &sk_buff. Unlike most other operations you _MUST_
549 * be careful with this one. A peek leaves the buffer on the
550 * list and someone else may run off with it. You must hold
551 * the appropriate locks or have a private queue to do this.
552 *
553 * Returns %NULL for an empty list or a pointer to the head element.
554 * The reference count is not incremented and the reference is therefore
555 * volatile. Use with caution.
556 */
557static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
558{
559 struct sk_buff *list = ((struct sk_buff *)list_)->next;
560 if (list == (struct sk_buff *)list_)
561 list = NULL;
562 return list;
563}
564
565/**
566 * skb_peek_tail
567 * @list_: list to peek at
568 *
569 * Peek an &sk_buff. Unlike most other operations you _MUST_
570 * be careful with this one. A peek leaves the buffer on the
571 * list and someone else may run off with it. You must hold
572 * the appropriate locks or have a private queue to do this.
573 *
574 * Returns %NULL for an empty list or a pointer to the tail element.
575 * The reference count is not incremented and the reference is therefore
576 * volatile. Use with caution.
577 */
578static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
579{
580 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
581 if (list == (struct sk_buff *)list_)
582 list = NULL;
583 return list;
584}
585
586/**
587 * skb_queue_len - get queue length
588 * @list_: list to measure
589 *
590 * Return the length of an &sk_buff queue.
591 */
592static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
593{
594 return list_->qlen;
595}
596
597static inline void skb_queue_head_init(struct sk_buff_head *list)
598{
599 spin_lock_init(&list->lock);
600 list->prev = list->next = (struct sk_buff *)list;
601 list->qlen = 0;
602}
603
604/*
605 * Insert an sk_buff at the start of a list.
606 *
607 * The "__skb_xxxx()" functions are the non-atomic ones that
608 * can only be called with interrupts disabled.
609 */
610
611/**
Stephen Hemminger300ce172005-10-30 13:47:34 -0800612 * __skb_queue_after - queue a buffer at the list head
613 * @list: list to use
614 * @prev: place after this buffer
615 * @newsk: buffer to queue
616 *
617 * Queue a buffer int the middle of a list. This function takes no locks
618 * and you must therefore hold required locks before calling it.
619 *
620 * A buffer cannot be placed on two lists at the same time.
621 */
622static inline void __skb_queue_after(struct sk_buff_head *list,
623 struct sk_buff *prev,
624 struct sk_buff *newsk)
625{
626 struct sk_buff *next;
627 list->qlen++;
628
629 next = prev->next;
630 newsk->next = next;
631 newsk->prev = prev;
632 next->prev = prev->next = newsk;
633}
634
635/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 * __skb_queue_head - queue a buffer at the list head
637 * @list: list to use
638 * @newsk: buffer to queue
639 *
640 * Queue a buffer at the start of a list. This function takes no locks
641 * and you must therefore hold required locks before calling it.
642 *
643 * A buffer cannot be placed on two lists at the same time.
644 */
645extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
646static inline void __skb_queue_head(struct sk_buff_head *list,
647 struct sk_buff *newsk)
648{
Stephen Hemminger300ce172005-10-30 13:47:34 -0800649 __skb_queue_after(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650}
651
652/**
653 * __skb_queue_tail - queue a buffer at the list tail
654 * @list: list to use
655 * @newsk: buffer to queue
656 *
657 * Queue a buffer at the end of a list. This function takes no locks
658 * and you must therefore hold required locks before calling it.
659 *
660 * A buffer cannot be placed on two lists at the same time.
661 */
662extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
663static inline void __skb_queue_tail(struct sk_buff_head *list,
664 struct sk_buff *newsk)
665{
666 struct sk_buff *prev, *next;
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 list->qlen++;
669 next = (struct sk_buff *)list;
670 prev = next->prev;
671 newsk->next = next;
672 newsk->prev = prev;
673 next->prev = prev->next = newsk;
674}
675
676
677/**
678 * __skb_dequeue - remove from the head of the queue
679 * @list: list to dequeue from
680 *
681 * Remove the head of the list. This function does not take any locks
682 * so must be used with appropriate locks held only. The head item is
683 * returned or %NULL if the list is empty.
684 */
685extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
686static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
687{
688 struct sk_buff *next, *prev, *result;
689
690 prev = (struct sk_buff *) list;
691 next = prev->next;
692 result = NULL;
693 if (next != prev) {
694 result = next;
695 next = next->next;
696 list->qlen--;
697 next->prev = prev;
698 prev->next = next;
699 result->next = result->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
701 return result;
702}
703
704
705/*
706 * Insert a packet on a list.
707 */
David S. Miller8728b832005-08-09 19:25:21 -0700708extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709static inline void __skb_insert(struct sk_buff *newsk,
710 struct sk_buff *prev, struct sk_buff *next,
711 struct sk_buff_head *list)
712{
713 newsk->next = next;
714 newsk->prev = prev;
715 next->prev = prev->next = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 list->qlen++;
717}
718
719/*
720 * Place a packet after a given packet in a list.
721 */
David S. Miller8728b832005-08-09 19:25:21 -0700722extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
723static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
David S. Miller8728b832005-08-09 19:25:21 -0700725 __skb_insert(newsk, old, old->next, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
728/*
729 * remove sk_buff from list. _Must_ be called atomically, and with
730 * the list known..
731 */
David S. Miller8728b832005-08-09 19:25:21 -0700732extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
734{
735 struct sk_buff *next, *prev;
736
737 list->qlen--;
738 next = skb->next;
739 prev = skb->prev;
740 skb->next = skb->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 next->prev = prev;
742 prev->next = next;
743}
744
745
746/* XXX: more streamlined implementation */
747
748/**
749 * __skb_dequeue_tail - remove from the tail of the queue
750 * @list: list to dequeue from
751 *
752 * Remove the tail of the list. This function does not take any locks
753 * so must be used with appropriate locks held only. The tail item is
754 * returned or %NULL if the list is empty.
755 */
756extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
757static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
758{
759 struct sk_buff *skb = skb_peek_tail(list);
760 if (skb)
761 __skb_unlink(skb, list);
762 return skb;
763}
764
765
766static inline int skb_is_nonlinear(const struct sk_buff *skb)
767{
768 return skb->data_len;
769}
770
771static inline unsigned int skb_headlen(const struct sk_buff *skb)
772{
773 return skb->len - skb->data_len;
774}
775
776static inline int skb_pagelen(const struct sk_buff *skb)
777{
778 int i, len = 0;
779
780 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
781 len += skb_shinfo(skb)->frags[i].size;
782 return len + skb_headlen(skb);
783}
784
785static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
786 struct page *page, int off, int size)
787{
788 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
789
790 frag->page = page;
791 frag->page_offset = off;
792 frag->size = size;
793 skb_shinfo(skb)->nr_frags = i + 1;
794}
795
796#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
797#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
798#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
799
800/*
801 * Add data to an sk_buff
802 */
803static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
804{
805 unsigned char *tmp = skb->tail;
806 SKB_LINEAR_ASSERT(skb);
807 skb->tail += len;
808 skb->len += len;
809 return tmp;
810}
811
812/**
813 * skb_put - add data to a buffer
814 * @skb: buffer to use
815 * @len: amount of data to add
816 *
817 * This function extends the used data area of the buffer. If this would
818 * exceed the total buffer size the kernel will panic. A pointer to the
819 * first byte of the extra data is returned.
820 */
821static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
822{
823 unsigned char *tmp = skb->tail;
824 SKB_LINEAR_ASSERT(skb);
825 skb->tail += len;
826 skb->len += len;
827 if (unlikely(skb->tail>skb->end))
828 skb_over_panic(skb, len, current_text_addr());
829 return tmp;
830}
831
832static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
833{
834 skb->data -= len;
835 skb->len += len;
836 return skb->data;
837}
838
839/**
840 * skb_push - add data to the start of a buffer
841 * @skb: buffer to use
842 * @len: amount of data to add
843 *
844 * This function extends the used data area of the buffer at the buffer
845 * start. If this would exceed the total buffer headroom the kernel will
846 * panic. A pointer to the first byte of the extra data is returned.
847 */
848static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
849{
850 skb->data -= len;
851 skb->len += len;
852 if (unlikely(skb->data<skb->head))
853 skb_under_panic(skb, len, current_text_addr());
854 return skb->data;
855}
856
857static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
858{
859 skb->len -= len;
860 BUG_ON(skb->len < skb->data_len);
861 return skb->data += len;
862}
863
864/**
865 * skb_pull - remove data from the start of a buffer
866 * @skb: buffer to use
867 * @len: amount of data to remove
868 *
869 * This function removes data from the start of a buffer, returning
870 * the memory to the headroom. A pointer to the next data in the buffer
871 * is returned. Once the data has been pulled future pushes will overwrite
872 * the old data.
873 */
874static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
875{
876 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
877}
878
879extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
880
881static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
882{
883 if (len > skb_headlen(skb) &&
884 !__pskb_pull_tail(skb, len-skb_headlen(skb)))
885 return NULL;
886 skb->len -= len;
887 return skb->data += len;
888}
889
890static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
891{
892 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
893}
894
895static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
896{
897 if (likely(len <= skb_headlen(skb)))
898 return 1;
899 if (unlikely(len > skb->len))
900 return 0;
901 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
902}
903
904/**
905 * skb_headroom - bytes at buffer head
906 * @skb: buffer to check
907 *
908 * Return the number of bytes of free space at the head of an &sk_buff.
909 */
910static inline int skb_headroom(const struct sk_buff *skb)
911{
912 return skb->data - skb->head;
913}
914
915/**
916 * skb_tailroom - bytes at buffer end
917 * @skb: buffer to check
918 *
919 * Return the number of bytes of free space at the tail of an sk_buff
920 */
921static inline int skb_tailroom(const struct sk_buff *skb)
922{
923 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
924}
925
926/**
927 * skb_reserve - adjust headroom
928 * @skb: buffer to alter
929 * @len: bytes to move
930 *
931 * Increase the headroom of an empty &sk_buff by reducing the tail
932 * room. This is only allowed for an empty buffer.
933 */
David S. Miller8243126c2006-01-17 02:54:21 -0800934static inline void skb_reserve(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
936 skb->data += len;
937 skb->tail += len;
938}
939
940/*
941 * CPUs often take a performance hit when accessing unaligned memory
942 * locations. The actual performance hit varies, it can be small if the
943 * hardware handles it or large if we have to take an exception and fix it
944 * in software.
945 *
946 * Since an ethernet header is 14 bytes network drivers often end up with
947 * the IP header at an unaligned offset. The IP header can be aligned by
948 * shifting the start of the packet by 2 bytes. Drivers should do this
949 * with:
950 *
951 * skb_reserve(NET_IP_ALIGN);
952 *
953 * The downside to this alignment of the IP header is that the DMA is now
954 * unaligned. On some architectures the cost of an unaligned DMA is high
955 * and this cost outweighs the gains made by aligning the IP header.
956 *
957 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
958 * to be overridden.
959 */
960#ifndef NET_IP_ALIGN
961#define NET_IP_ALIGN 2
962#endif
963
Anton Blanchard025be812006-03-31 02:27:06 -0800964/*
965 * The networking layer reserves some headroom in skb data (via
966 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
967 * the header has to grow. In the default case, if the header has to grow
968 * 16 bytes or less we avoid the reallocation.
969 *
970 * Unfortunately this headroom changes the DMA alignment of the resulting
971 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
972 * on some architectures. An architecture can override this value,
973 * perhaps setting it to a cacheline in size (since that will maintain
974 * cacheline alignment of the DMA). It must be a power of 2.
975 *
976 * Various parts of the networking layer expect at least 16 bytes of
977 * headroom, you should not reduce this.
978 */
979#ifndef NET_SKB_PAD
980#define NET_SKB_PAD 16
981#endif
982
Herbert Xu3cc0e872006-06-09 16:13:38 -0700983extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
986{
Herbert Xu3cc0e872006-06-09 16:13:38 -0700987 if (unlikely(skb->data_len)) {
988 WARN_ON(1);
989 return;
990 }
991 skb->len = len;
992 skb->tail = skb->data + len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993}
994
995/**
996 * skb_trim - remove end from a buffer
997 * @skb: buffer to alter
998 * @len: new length
999 *
1000 * Cut the length of a buffer down by removing data from the tail. If
1001 * the buffer is already under the length specified it is not modified.
Herbert Xu3cc0e872006-06-09 16:13:38 -07001002 * The skb must be linear.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 */
1004static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1005{
1006 if (skb->len > len)
1007 __skb_trim(skb, len);
1008}
1009
1010
1011static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1012{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001013 if (skb->data_len)
1014 return ___pskb_trim(skb, len);
1015 __skb_trim(skb, len);
1016 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017}
1018
1019static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1020{
1021 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1022}
1023
1024/**
1025 * skb_orphan - orphan a buffer
1026 * @skb: buffer to orphan
1027 *
1028 * If a buffer currently has an owner then we call the owner's
1029 * destructor function and make the @skb unowned. The buffer continues
1030 * to exist but is no longer charged to its former owner.
1031 */
1032static inline void skb_orphan(struct sk_buff *skb)
1033{
1034 if (skb->destructor)
1035 skb->destructor(skb);
1036 skb->destructor = NULL;
1037 skb->sk = NULL;
1038}
1039
1040/**
1041 * __skb_queue_purge - empty a list
1042 * @list: list to empty
1043 *
1044 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1045 * the list and one reference dropped. This function does not take the
1046 * list lock and the caller must hold the relevant locks to use it.
1047 */
1048extern void skb_queue_purge(struct sk_buff_head *list);
1049static inline void __skb_queue_purge(struct sk_buff_head *list)
1050{
1051 struct sk_buff *skb;
1052 while ((skb = __skb_dequeue(list)) != NULL)
1053 kfree_skb(skb);
1054}
1055
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001056#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057/**
1058 * __dev_alloc_skb - allocate an skbuff for sending
1059 * @length: length to allocate
1060 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1061 *
1062 * Allocate a new &sk_buff and assign it a usage count of one. The
1063 * buffer has unspecified headroom built in. Users should allocate
1064 * the headroom they think they need without accounting for the
1065 * built in space. The built in space is used for optimisations.
1066 *
1067 * %NULL is returned in there is no free memory.
1068 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
Al Virodd0fc662005-10-07 07:46:04 +01001070 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Anton Blanchard025be812006-03-31 02:27:06 -08001072 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 if (likely(skb))
Anton Blanchard025be812006-03-31 02:27:06 -08001074 skb_reserve(skb, NET_SKB_PAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 return skb;
1076}
1077#else
1078extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
1079#endif
1080
1081/**
1082 * dev_alloc_skb - allocate an skbuff for sending
1083 * @length: length to allocate
1084 *
1085 * Allocate a new &sk_buff and assign it a usage count of one. The
1086 * buffer has unspecified headroom built in. Users should allocate
1087 * the headroom they think they need without accounting for the
1088 * built in space. The built in space is used for optimisations.
1089 *
1090 * %NULL is returned in there is no free memory. Although this function
1091 * allocates memory it can be called from an interrupt.
1092 */
1093static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1094{
1095 return __dev_alloc_skb(length, GFP_ATOMIC);
1096}
1097
1098/**
1099 * skb_cow - copy header of skb when it is required
1100 * @skb: buffer to cow
1101 * @headroom: needed headroom
1102 *
1103 * If the skb passed lacks sufficient headroom or its data part
1104 * is shared, data is reallocated. If reallocation fails, an error
1105 * is returned and original skb is not changed.
1106 *
1107 * The result is skb with writable area skb->head...skb->tail
1108 * and at least @headroom of space at head.
1109 */
1110static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1111{
Anton Blanchard025be812006-03-31 02:27:06 -08001112 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
1113 skb_headroom(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
1115 if (delta < 0)
1116 delta = 0;
1117
1118 if (delta || skb_cloned(skb))
Anton Blanchard025be812006-03-31 02:27:06 -08001119 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1120 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 return 0;
1122}
1123
1124/**
1125 * skb_padto - pad an skbuff up to a minimal size
1126 * @skb: buffer to pad
1127 * @len: minimal length
1128 *
1129 * Pads up a buffer to ensure the trailing bytes exist and are
1130 * blanked. If the buffer already contains sufficient data it
Herbert Xu5b057c62006-06-23 02:06:41 -07001131 * is untouched. Otherwise it is extended. Returns zero on
1132 * success. The skb is freed on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 */
1134
Herbert Xu5b057c62006-06-23 02:06:41 -07001135static inline int skb_padto(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
1137 unsigned int size = skb->len;
1138 if (likely(size >= len))
Herbert Xu5b057c62006-06-23 02:06:41 -07001139 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 return skb_pad(skb, len-size);
1141}
1142
1143static inline int skb_add_data(struct sk_buff *skb,
1144 char __user *from, int copy)
1145{
1146 const int off = skb->len;
1147
1148 if (skb->ip_summed == CHECKSUM_NONE) {
1149 int err = 0;
1150 unsigned int csum = csum_and_copy_from_user(from,
1151 skb_put(skb, copy),
1152 copy, 0, &err);
1153 if (!err) {
1154 skb->csum = csum_block_add(skb->csum, csum, off);
1155 return 0;
1156 }
1157 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1158 return 0;
1159
1160 __skb_trim(skb, off);
1161 return -EFAULT;
1162}
1163
1164static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1165 struct page *page, int off)
1166{
1167 if (i) {
1168 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1169
1170 return page == frag->page &&
1171 off == frag->page_offset + frag->size;
1172 }
1173 return 0;
1174}
1175
Herbert Xu364c6ba2006-06-09 16:10:40 -07001176static inline int __skb_linearize(struct sk_buff *skb)
1177{
1178 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1179}
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181/**
1182 * skb_linearize - convert paged skb to linear one
1183 * @skb: buffer to linarize
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 *
1185 * If there is no free memory -ENOMEM is returned, otherwise zero
1186 * is returned and the old skb data released.
1187 */
Herbert Xu364c6ba2006-06-09 16:10:40 -07001188static inline int skb_linearize(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Herbert Xu364c6ba2006-06-09 16:10:40 -07001190 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1191}
1192
1193/**
1194 * skb_linearize_cow - make sure skb is linear and writable
1195 * @skb: buffer to process
1196 *
1197 * If there is no free memory -ENOMEM is returned, otherwise zero
1198 * is returned and the old skb data released.
1199 */
1200static inline int skb_linearize_cow(struct sk_buff *skb)
1201{
1202 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1203 __skb_linearize(skb) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204}
1205
1206/**
1207 * skb_postpull_rcsum - update checksum for received skb after pull
1208 * @skb: buffer to update
1209 * @start: start of data before pull
1210 * @len: length of data pulled
1211 *
1212 * After doing a pull on a received packet, you need to call this to
1213 * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE
1214 * so that it can be recomputed from scratch.
1215 */
1216
1217static inline void skb_postpull_rcsum(struct sk_buff *skb,
Herbert Xucbb042f2006-03-20 22:43:56 -08001218 const void *start, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219{
1220 if (skb->ip_summed == CHECKSUM_HW)
1221 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1222}
1223
Herbert Xucbb042f2006-03-20 22:43:56 -08001224unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226/**
1227 * pskb_trim_rcsum - trim received skb and update checksum
1228 * @skb: buffer to trim
1229 * @len: new length
1230 *
1231 * This is exactly the same as pskb_trim except that it ensures the
1232 * checksum of received packets are still valid after the operation.
1233 */
1234
1235static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1236{
Stephen Hemminger0e4e4222005-09-08 12:32:03 -07001237 if (likely(len >= skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 return 0;
1239 if (skb->ip_summed == CHECKSUM_HW)
1240 skb->ip_summed = CHECKSUM_NONE;
1241 return __pskb_trim(skb, len);
1242}
1243
1244static inline void *kmap_skb_frag(const skb_frag_t *frag)
1245{
1246#ifdef CONFIG_HIGHMEM
1247 BUG_ON(in_irq());
1248
1249 local_bh_disable();
1250#endif
1251 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1252}
1253
1254static inline void kunmap_skb_frag(void *vaddr)
1255{
1256 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1257#ifdef CONFIG_HIGHMEM
1258 local_bh_enable();
1259#endif
1260}
1261
1262#define skb_queue_walk(queue, skb) \
1263 for (skb = (queue)->next; \
1264 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1265 skb = skb->next)
1266
Stephen Hemminger300ce172005-10-30 13:47:34 -08001267#define skb_queue_reverse_walk(queue, skb) \
1268 for (skb = (queue)->prev; \
1269 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1270 skb = skb->prev)
1271
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
1273extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1274 int noblock, int *err);
1275extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1276 struct poll_table_struct *wait);
1277extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1278 int offset, struct iovec *to,
1279 int size);
Herbert Xufb286bb2005-11-10 13:01:24 -08001280extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 int hlen,
1282 struct iovec *iov);
1283extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
Herbert Xu3305b802005-12-13 23:16:37 -08001284extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1285 unsigned int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1287 int len, unsigned int csum);
1288extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1289 void *to, int len);
Herbert Xu357b40a2005-04-19 22:30:14 -07001290extern int skb_store_bits(const struct sk_buff *skb, int offset,
1291 void *from, int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
1293 int offset, u8 *to, int len,
1294 unsigned int csum);
1295extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1296extern void skb_split(struct sk_buff *skb,
1297 struct sk_buff *skb1, const u32 len);
1298
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001299extern void skb_release_data(struct sk_buff *skb);
1300
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1302 int len, void *buffer)
1303{
1304 int hlen = skb_headlen(skb);
1305
Patrick McHardy55820ee2005-07-05 14:08:10 -07001306 if (hlen - offset >= len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 return skb->data + offset;
1308
1309 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1310 return NULL;
1311
1312 return buffer;
1313}
1314
1315extern void skb_init(void);
1316extern void skb_add_mtu(int mtu);
1317
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001318/**
1319 * skb_get_timestamp - get timestamp from a skb
1320 * @skb: skb to get stamp from
1321 * @stamp: pointer to struct timeval to store stamp in
1322 *
1323 * Timestamps are stored in the skb as offsets to a base timestamp.
1324 * This function converts the offset back to a struct timeval and stores
1325 * it in stamp.
1326 */
Stephen Hemmingerf2c38392005-09-06 15:48:03 -07001327static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001328{
1329 stamp->tv_sec = skb->tstamp.off_sec;
1330 stamp->tv_usec = skb->tstamp.off_usec;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001331}
1332
1333/**
1334 * skb_set_timestamp - set timestamp of a skb
1335 * @skb: skb to set stamp of
1336 * @stamp: pointer to struct timeval to get stamp from
1337 *
1338 * Timestamps are stored in the skb as offsets to a base timestamp.
1339 * This function converts a struct timeval to an offset and stores
1340 * it in the skb.
1341 */
Stephen Hemmingerf2c38392005-09-06 15:48:03 -07001342static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001343{
Herbert Xu325ed822005-10-03 13:57:23 -07001344 skb->tstamp.off_sec = stamp->tv_sec;
1345 skb->tstamp.off_usec = stamp->tv_usec;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001346}
1347
1348extern void __net_timestamp(struct sk_buff *skb);
1349
Herbert Xufb286bb2005-11-10 13:01:24 -08001350extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
1351
1352/**
1353 * skb_checksum_complete - Calculate checksum of an entire packet
1354 * @skb: packet to process
1355 *
1356 * This function calculates the checksum over the entire packet plus
1357 * the value of skb->csum. The latter can be used to supply the
1358 * checksum of a pseudo header as used by TCP/UDP. It returns the
1359 * checksum.
1360 *
1361 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
1362 * this function can be used to verify that checksum on received
1363 * packets. In that case the function should return zero if the
1364 * checksum is correct. In particular, this function will return zero
1365 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1366 * hardware has already verified the correctness of the checksum.
1367 */
1368static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1369{
1370 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1371 __skb_checksum_complete(skb);
1372}
1373
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374#ifdef CONFIG_NETFILTER
1375static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1376{
1377 if (nfct && atomic_dec_and_test(&nfct->use))
1378 nfct->destroy(nfct);
1379}
1380static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1381{
1382 if (nfct)
1383 atomic_inc(&nfct->use);
1384}
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001385#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1386static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1387{
1388 if (skb)
1389 atomic_inc(&skb->users);
1390}
1391static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1392{
1393 if (skb)
1394 kfree_skb(skb);
1395}
1396#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397#ifdef CONFIG_BRIDGE_NETFILTER
1398static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1399{
1400 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1401 kfree(nf_bridge);
1402}
1403static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1404{
1405 if (nf_bridge)
1406 atomic_inc(&nf_bridge->use);
1407}
1408#endif /* CONFIG_BRIDGE_NETFILTER */
Patrick McHardya193a4a2006-03-20 19:23:05 -08001409static inline void nf_reset(struct sk_buff *skb)
1410{
1411 nf_conntrack_put(skb->nfct);
1412 skb->nfct = NULL;
1413#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1414 nf_conntrack_put_reasm(skb->nfct_reasm);
1415 skb->nfct_reasm = NULL;
1416#endif
1417#ifdef CONFIG_BRIDGE_NETFILTER
1418 nf_bridge_put(skb->nf_bridge);
1419 skb->nf_bridge = NULL;
1420#endif
1421}
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423#else /* CONFIG_NETFILTER */
1424static inline void nf_reset(struct sk_buff *skb) {}
1425#endif /* CONFIG_NETFILTER */
1426
James Morris984bc162006-06-09 00:29:17 -07001427#ifdef CONFIG_NETWORK_SECMARK
1428static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1429{
1430 to->secmark = from->secmark;
1431}
1432
1433static inline void skb_init_secmark(struct sk_buff *skb)
1434{
1435 skb->secmark = 0;
1436}
1437#else
1438static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1439{ }
1440
1441static inline void skb_init_secmark(struct sk_buff *skb)
1442{ }
1443#endif
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445#endif /* __KERNEL__ */
1446#endif /* _LINUX_SKBUFF_H */