blob: 0da505c9ac233b9cf2ca4f20475dc8d987ef68d5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f12011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
76#include <asm/system.h>
77#include <asm/uaccess.h>
78#include <asm/ioctls.h>
79#include <asm/page.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040080#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <asm/io.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84#include <linux/poll.h>
85#include <linux/module.h>
86#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080087#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070088#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080089#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000090#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070091#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93#ifdef CONFIG_INET
94#include <net/inet_common.h>
95#endif
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 Assumptions:
99 - if device has no dev->hard_header routine, it adds and removes ll header
100 inside itself. In this case ll header is invisible outside of device,
101 but higher levels still should reserve dev->hard_header_len.
102 Some devices are enough clever to reallocate skb, when header
103 will not fit to reserved space (tunnel), another ones are silly
104 (PPP).
105 - packet socket receives packets with pulled ll header,
106 so that SOCK_RAW should push it back.
107
108On receive:
109-----------
110
111Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700112 mac_header -> ll header
113 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700116 mac_header -> ll header
117 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700120 mac_header -> UNKNOWN position. It is very likely, that it points to ll
121 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900122 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700123 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700126 mac_header -> data. ll header is still not built!
127 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129Resume
130 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
131
132
133On transmit:
134------------
135
136dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700137 mac_header -> ll header
138 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700141 mac_header -> data
142 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 We should set nh.raw on output to correct posistion,
145 packet classifier depends on it.
146 */
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148/* Private packet socket structures. */
149
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000150struct packet_mclist {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 struct packet_mclist *next;
152 int ifindex;
153 int count;
154 unsigned short type;
155 unsigned short alen;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700156 unsigned char addr[MAX_ADDR_LEN];
157};
158/* identical to struct packet_mreq except it has
159 * a longer address field.
160 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000161struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700162 int mr_ifindex;
163 unsigned short mr_type;
164 unsigned short mr_alen;
165 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166};
David S. Millera2efcfa2007-05-29 13:12:50 -0700167
chetan lokef6fb8f12011-08-19 10:18:16 +0000168static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700169 int closing, int tx_ring);
170
chetan lokef6fb8f12011-08-19 10:18:16 +0000171
172#define V3_ALIGNMENT (8)
173
chetan lokebc59ba32011-08-25 10:43:30 +0000174#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f12011-08-19 10:18:16 +0000175
176#define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179/* kbdq - kernel block descriptor queue */
chetan lokebc59ba32011-08-25 10:43:30 +0000180struct tpacket_kbdq_core {
chetan lokef6fb8f12011-08-19 10:18:16 +0000181 struct pgv *pkbdq;
182 unsigned int feature_req_word;
183 unsigned int hdrlen;
184 unsigned char reset_pending_on_curr_blk;
185 unsigned char delete_blk_timer;
186 unsigned short kactive_blk_num;
187 unsigned short blk_sizeof_priv;
188
189 /* last_kactive_blk_num:
190 * trick to see if user-space has caught up
191 * in order to avoid refreshing timer when every single pkt arrives.
192 */
193 unsigned short last_kactive_blk_num;
194
195 char *pkblk_start;
196 char *pkblk_end;
197 int kblk_size;
198 unsigned int knum_blocks;
199 uint64_t knxt_seq_num;
200 char *prev;
201 char *nxt_offset;
202 struct sk_buff *skb;
203
204 atomic_t blk_fill_in_prog;
205
206 /* Default is set to 8ms */
207#define DEFAULT_PRB_RETIRE_TOV (8)
208
209 unsigned short retire_blk_tov;
210 unsigned short version;
211 unsigned long tov_in_jiffies;
212
213 /* timer to retire an outstanding block */
214 struct timer_list retire_blk_timer;
215};
216
217#define PGV_FROM_VMALLOC 1
Neil Horman0e3125c2010-11-16 10:26:47 -0800218struct pgv {
219 char *buffer;
Neil Horman0e3125c2010-11-16 10:26:47 -0800220};
221
Johann Baudy69e3c752009-05-18 22:11:22 -0700222struct packet_ring_buffer {
Neil Horman0e3125c2010-11-16 10:26:47 -0800223 struct pgv *pg_vec;
Johann Baudy69e3c752009-05-18 22:11:22 -0700224 unsigned int head;
225 unsigned int frames_per_block;
226 unsigned int frame_size;
227 unsigned int frame_max;
228
229 unsigned int pg_vec_order;
230 unsigned int pg_vec_pages;
231 unsigned int pg_vec_len;
232
chetan lokebc59ba32011-08-25 10:43:30 +0000233 struct tpacket_kbdq_core prb_bdqc;
Johann Baudy69e3c752009-05-18 22:11:22 -0700234 atomic_t pending;
235};
236
chetan lokef6fb8f12011-08-19 10:18:16 +0000237#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
238#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
239#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
240#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
241#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
242#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
243#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
244
Johann Baudy69e3c752009-05-18 22:11:22 -0700245struct packet_sock;
246static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
chetan lokef6fb8f12011-08-19 10:18:16 +0000248static void *packet_previous_frame(struct packet_sock *po,
249 struct packet_ring_buffer *rb,
250 int status);
251static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000252static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
253 struct tpacket_block_desc *);
254static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000255 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000256static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000257 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000258static int prb_queue_frozen(struct tpacket_kbdq_core *);
259static void prb_open_block(struct tpacket_kbdq_core *,
260 struct tpacket_block_desc *);
chetan lokef6fb8f12011-08-19 10:18:16 +0000261static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000262static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
263static void prb_init_blk_timer(struct packet_sock *,
264 struct tpacket_kbdq_core *,
265 void (*func) (unsigned long));
266static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
267static void prb_clear_rxhash(struct tpacket_kbdq_core *,
268 struct tpacket3_hdr *);
269static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
270 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271static void packet_flush_mclist(struct sock *sk);
272
David S. Millerdc99f602011-07-05 01:45:05 -0700273struct packet_fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274struct packet_sock {
275 /* struct sock has to be the first member of packet_sock */
276 struct sock sk;
David S. Millerdc99f602011-07-05 01:45:05 -0700277 struct packet_fanout *fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 struct tpacket_stats stats;
chetan lokef6fb8f12011-08-19 10:18:16 +0000279 union tpacket_stats_u stats_u;
Johann Baudy69e3c752009-05-18 22:11:22 -0700280 struct packet_ring_buffer rx_ring;
281 struct packet_ring_buffer tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 int copy_thresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 spinlock_t bind_lock;
Herbert Xu905db442009-01-30 14:12:06 -0800284 struct mutex pg_vec_lock;
Herbert Xu8dc41942007-02-04 23:31:32 -0800285 unsigned int running:1, /* prot_hook is attached*/
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -0700286 auxdata:1,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -0800287 origdev:1,
288 has_vnet_hdr:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 int ifindex; /* bound device */
Al Viro0e11c912006-11-08 00:26:29 -0800290 __be16 num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 struct packet_mclist *mclist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 atomic_t mapped;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700293 enum tpacket_versions tp_version;
294 unsigned int tp_hdrlen;
Patrick McHardy89133362008-07-18 18:05:19 -0700295 unsigned int tp_reserve;
Johann Baudy69e3c752009-05-18 22:11:22 -0700296 unsigned int tp_loss:1;
Scott McMillan614f60f2010-06-02 05:53:56 -0700297 unsigned int tp_tstamp;
Eric Dumazet94b05952009-10-16 04:02:20 +0000298 struct packet_type prot_hook ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299};
300
David S. Millerdc99f602011-07-05 01:45:05 -0700301#define PACKET_FANOUT_MAX 256
302
303struct packet_fanout {
304#ifdef CONFIG_NET_NS
305 struct net *net;
306#endif
307 unsigned int num_members;
308 u16 id;
309 u8 type;
David S. Miller7736d332011-07-05 01:43:20 -0700310 u8 defrag;
David S. Millerdc99f602011-07-05 01:45:05 -0700311 atomic_t rr_cur;
312 struct list_head list;
313 struct sock *arr[PACKET_FANOUT_MAX];
314 spinlock_t lock;
315 atomic_t sk_ref;
316 struct packet_type prot_hook ____cacheline_aligned_in_smp;
317};
318
Herbert Xuffbc6112007-02-04 23:33:10 -0800319struct packet_skb_cb {
320 unsigned int origlen;
321 union {
322 struct sockaddr_pkt pkt;
323 struct sockaddr_ll ll;
324 } sa;
325};
326
327#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800328
chetan lokebc59ba32011-08-25 10:43:30 +0000329#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f12011-08-19 10:18:16 +0000330#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000331 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000332#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000333 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000334#define GET_NEXT_PRB_BLK_NUM(x) \
335 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
336 ((x)->kactive_blk_num+1) : 0)
337
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000338static struct packet_sock *pkt_sk(struct sock *sk)
David S. Millerce06b032011-07-04 01:44:29 -0700339{
340 return (struct packet_sock *)sk;
341}
342
David S. Millerdc99f602011-07-05 01:45:05 -0700343static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
344static void __fanout_link(struct sock *sk, struct packet_sock *po);
345
David S. Millerce06b032011-07-04 01:44:29 -0700346/* register_prot_hook must be invoked with the po->bind_lock held,
347 * or from a context in which asynchronous accesses to the packet
348 * socket is not possible (packet_create()).
349 */
350static void register_prot_hook(struct sock *sk)
351{
352 struct packet_sock *po = pkt_sk(sk);
353 if (!po->running) {
David S. Millerdc99f602011-07-05 01:45:05 -0700354 if (po->fanout)
355 __fanout_link(sk, po);
356 else
357 dev_add_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700358 sock_hold(sk);
359 po->running = 1;
360 }
361}
362
363/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
364 * held. If the sync parameter is true, we will temporarily drop
365 * the po->bind_lock and do a synchronize_net to make sure no
366 * asynchronous packet processing paths still refer to the elements
367 * of po->prot_hook. If the sync parameter is false, it is the
368 * callers responsibility to take care of this.
369 */
370static void __unregister_prot_hook(struct sock *sk, bool sync)
371{
372 struct packet_sock *po = pkt_sk(sk);
373
374 po->running = 0;
David S. Millerdc99f602011-07-05 01:45:05 -0700375 if (po->fanout)
376 __fanout_unlink(sk, po);
377 else
378 __dev_remove_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700379 __sock_put(sk);
380
381 if (sync) {
382 spin_unlock(&po->bind_lock);
383 synchronize_net();
384 spin_lock(&po->bind_lock);
385 }
386}
387
388static void unregister_prot_hook(struct sock *sk, bool sync)
389{
390 struct packet_sock *po = pkt_sk(sk);
391
392 if (po->running)
393 __unregister_prot_hook(sk, sync);
394}
395
Changli Gaof6dafa92010-12-07 04:26:16 +0000396static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000397{
398 if (is_vmalloc_addr(addr))
399 return vmalloc_to_page(addr);
400 return virt_to_page(addr);
401}
402
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700403static void __packet_set_status(struct packet_sock *po, void *frame, int status)
404{
405 union {
406 struct tpacket_hdr *h1;
407 struct tpacket2_hdr *h2;
408 void *raw;
409 } h;
410
411 h.raw = frame;
412 switch (po->tp_version) {
413 case TPACKET_V1:
414 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000415 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700416 break;
417 case TPACKET_V2:
418 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000419 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700420 break;
chetan lokef6fb8f12011-08-19 10:18:16 +0000421 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700422 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000423 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700424 BUG();
425 }
426
427 smp_wmb();
428}
429
430static int __packet_get_status(struct packet_sock *po, void *frame)
431{
432 union {
433 struct tpacket_hdr *h1;
434 struct tpacket2_hdr *h2;
435 void *raw;
436 } h;
437
438 smp_rmb();
439
440 h.raw = frame;
441 switch (po->tp_version) {
442 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000443 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700444 return h.h1->tp_status;
445 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000446 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700447 return h.h2->tp_status;
chetan lokef6fb8f12011-08-19 10:18:16 +0000448 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700449 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000450 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700451 BUG();
452 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Johann Baudy69e3c752009-05-18 22:11:22 -0700455
456static void *packet_lookup_frame(struct packet_sock *po,
457 struct packet_ring_buffer *rb,
458 unsigned int position,
459 int status)
460{
461 unsigned int pg_vec_pos, frame_offset;
462 union {
463 struct tpacket_hdr *h1;
464 struct tpacket2_hdr *h2;
465 void *raw;
466 } h;
467
468 pg_vec_pos = position / rb->frames_per_block;
469 frame_offset = position % rb->frames_per_block;
470
Neil Horman0e3125c2010-11-16 10:26:47 -0800471 h.raw = rb->pg_vec[pg_vec_pos].buffer +
472 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700473
474 if (status != __packet_get_status(po, h.raw))
475 return NULL;
476
477 return h.raw;
478}
479
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000480static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700481 struct packet_ring_buffer *rb,
482 int status)
483{
484 return packet_lookup_frame(po, rb, rb->head, status);
485}
486
chetan lokebc59ba32011-08-25 10:43:30 +0000487static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000488{
489 del_timer_sync(&pkc->retire_blk_timer);
490}
491
492static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
493 int tx_ring,
494 struct sk_buff_head *rb_queue)
495{
chetan lokebc59ba32011-08-25 10:43:30 +0000496 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000497
498 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
499
500 spin_lock(&rb_queue->lock);
501 pkc->delete_blk_timer = 1;
502 spin_unlock(&rb_queue->lock);
503
504 prb_del_retire_blk_timer(pkc);
505}
506
507static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000508 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000509 void (*func) (unsigned long))
510{
511 init_timer(&pkc->retire_blk_timer);
512 pkc->retire_blk_timer.data = (long)po;
513 pkc->retire_blk_timer.function = func;
514 pkc->retire_blk_timer.expires = jiffies;
515}
516
517static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
518{
chetan lokebc59ba32011-08-25 10:43:30 +0000519 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000520
521 if (tx_ring)
522 BUG();
523
524 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
525 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
526}
527
528static int prb_calc_retire_blk_tmo(struct packet_sock *po,
529 int blk_size_in_bytes)
530{
531 struct net_device *dev;
532 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000533 struct ethtool_cmd ecmd;
534 int err;
chetan lokef6fb8f12011-08-19 10:18:16 +0000535
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000536 rtnl_lock();
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
539 rtnl_unlock();
chetan lokef6fb8f12011-08-19 10:18:16 +0000540 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000541 }
542 err = __ethtool_get_settings(dev, &ecmd);
543 rtnl_unlock();
544 if (!err) {
545 switch (ecmd.speed) {
546 case SPEED_10000:
547 msec = 1;
548 div = 10000/1000;
549 break;
550 case SPEED_1000:
551 msec = 1;
552 div = 1000/1000;
553 break;
554 /*
555 * If the link speed is so slow you don't really
556 * need to worry about perf anyways
557 */
558 case SPEED_100:
559 case SPEED_10:
560 default:
561 return DEFAULT_PRB_RETIRE_TOV;
chetan lokef6fb8f12011-08-19 10:18:16 +0000562 }
563 }
564
565 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
566
567 if (div)
568 mbits /= div;
569
570 tmo = mbits * msec;
571
572 if (div)
573 return tmo+1;
574 return tmo;
575}
576
chetan lokebc59ba32011-08-25 10:43:30 +0000577static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000578 union tpacket_req_u *req_u)
579{
580 p1->feature_req_word = req_u->req3.tp_feature_req_word;
581}
582
583static void init_prb_bdqc(struct packet_sock *po,
584 struct packet_ring_buffer *rb,
585 struct pgv *pg_vec,
586 union tpacket_req_u *req_u, int tx_ring)
587{
chetan lokebc59ba32011-08-25 10:43:30 +0000588 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
589 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000590
591 memset(p1, 0x0, sizeof(*p1));
592
593 p1->knxt_seq_num = 1;
594 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000595 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
chetan lokef6fb8f12011-08-19 10:18:16 +0000596 p1->pkblk_start = (char *)pg_vec[0].buffer;
597 p1->kblk_size = req_u->req3.tp_block_size;
598 p1->knum_blocks = req_u->req3.tp_block_nr;
599 p1->hdrlen = po->tp_hdrlen;
600 p1->version = po->tp_version;
601 p1->last_kactive_blk_num = 0;
602 po->stats_u.stats3.tp_freeze_q_cnt = 0;
603 if (req_u->req3.tp_retire_blk_tov)
604 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
605 else
606 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
607 req_u->req3.tp_block_size);
608 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
609 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
610
611 prb_init_ft_ops(p1, req_u);
612 prb_setup_retire_blk_timer(po, tx_ring);
613 prb_open_block(p1, pbd);
614}
615
616/* Do NOT update the last_blk_num first.
617 * Assumes sk_buff_head lock is held.
618 */
chetan lokebc59ba32011-08-25 10:43:30 +0000619static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000620{
621 mod_timer(&pkc->retire_blk_timer,
622 jiffies + pkc->tov_in_jiffies);
623 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
624}
625
626/*
627 * Timer logic:
628 * 1) We refresh the timer only when we open a block.
629 * By doing this we don't waste cycles refreshing the timer
630 * on packet-by-packet basis.
631 *
632 * With a 1MB block-size, on a 1Gbps line, it will take
633 * i) ~8 ms to fill a block + ii) memcpy etc.
634 * In this cut we are not accounting for the memcpy time.
635 *
636 * So, if the user sets the 'tmo' to 10ms then the timer
637 * will never fire while the block is still getting filled
638 * (which is what we want). However, the user could choose
639 * to close a block early and that's fine.
640 *
641 * But when the timer does fire, we check whether or not to refresh it.
642 * Since the tmo granularity is in msecs, it is not too expensive
643 * to refresh the timer, lets say every '8' msecs.
644 * Either the user can set the 'tmo' or we can derive it based on
645 * a) line-speed and b) block-size.
646 * prb_calc_retire_blk_tmo() calculates the tmo.
647 *
648 */
649static void prb_retire_rx_blk_timer_expired(unsigned long data)
650{
651 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000652 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000653 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000654 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000655
656 spin_lock(&po->sk.sk_receive_queue.lock);
657
658 frozen = prb_queue_frozen(pkc);
659 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
660
661 if (unlikely(pkc->delete_blk_timer))
662 goto out;
663
664 /* We only need to plug the race when the block is partially filled.
665 * tpacket_rcv:
666 * lock(); increment BLOCK_NUM_PKTS; unlock()
667 * copy_bits() is in progress ...
668 * timer fires on other cpu:
669 * we can't retire the current block because copy_bits
670 * is in progress.
671 *
672 */
673 if (BLOCK_NUM_PKTS(pbd)) {
674 while (atomic_read(&pkc->blk_fill_in_prog)) {
675 /* Waiting for skb_copy_bits to finish... */
676 cpu_relax();
677 }
678 }
679
680 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
681 if (!frozen) {
682 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
683 if (!prb_dispatch_next_block(pkc, po))
684 goto refresh_timer;
685 else
686 goto out;
687 } else {
688 /* Case 1. Queue was frozen because user-space was
689 * lagging behind.
690 */
691 if (prb_curr_blk_in_use(pkc, pbd)) {
692 /*
693 * Ok, user-space is still behind.
694 * So just refresh the timer.
695 */
696 goto refresh_timer;
697 } else {
698 /* Case 2. queue was frozen,user-space caught up,
699 * now the link went idle && the timer fired.
700 * We don't have a block to close.So we open this
701 * block and restart the timer.
702 * opening a block thaws the queue,restarts timer
703 * Thawing/timer-refresh is a side effect.
704 */
705 prb_open_block(pkc, pbd);
706 goto out;
707 }
708 }
709 }
710
711refresh_timer:
712 _prb_refresh_rx_retire_blk_timer(pkc);
713
714out:
715 spin_unlock(&po->sk.sk_receive_queue.lock);
716}
717
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000718static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000719 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f12011-08-19 10:18:16 +0000720{
721 /* Flush everything minus the block header */
722
723#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
724 u8 *start, *end;
725
726 start = (u8 *)pbd1;
727
728 /* Skip the block header(we know header WILL fit in 4K) */
729 start += PAGE_SIZE;
730
731 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
732 for (; start < end; start += PAGE_SIZE)
733 flush_dcache_page(pgv_to_page(start));
734
735 smp_wmb();
736#endif
737
738 /* Now update the block status. */
739
740 BLOCK_STATUS(pbd1) = status;
741
742 /* Flush the block header */
743
744#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
745 start = (u8 *)pbd1;
746 flush_dcache_page(pgv_to_page(start));
747
748 smp_wmb();
749#endif
750}
751
752/*
753 * Side effect:
754 *
755 * 1) flush the block
756 * 2) Increment active_blk_num
757 *
758 * Note:We DONT refresh the timer on purpose.
759 * Because almost always the next block will be opened.
760 */
chetan lokebc59ba32011-08-25 10:43:30 +0000761static void prb_close_block(struct tpacket_kbdq_core *pkc1,
762 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000763 struct packet_sock *po, unsigned int stat)
764{
765 __u32 status = TP_STATUS_USER | stat;
766
767 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000768 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000769
770 if (po->stats.tp_drops)
771 status |= TP_STATUS_LOSING;
772
773 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
774 last_pkt->tp_next_offset = 0;
775
776 /* Get the ts of the last pkt */
777 if (BLOCK_NUM_PKTS(pbd1)) {
778 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
779 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
780 } else {
781 /* Ok, we tmo'd - so get the current time */
782 struct timespec ts;
783 getnstimeofday(&ts);
784 h1->ts_last_pkt.ts_sec = ts.tv_sec;
785 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
786 }
787
788 smp_wmb();
789
790 /* Flush the block */
791 prb_flush_block(pkc1, pbd1, status);
792
793 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
794}
795
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000796static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000797{
798 pkc->reset_pending_on_curr_blk = 0;
799}
800
801/*
802 * Side effect of opening a block:
803 *
804 * 1) prb_queue is thawed.
805 * 2) retire_blk_timer is refreshed.
806 *
807 */
chetan lokebc59ba32011-08-25 10:43:30 +0000808static void prb_open_block(struct tpacket_kbdq_core *pkc1,
809 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f12011-08-19 10:18:16 +0000810{
811 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000812 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000813
814 smp_rmb();
815
816 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
817
818 /* We could have just memset this but we will lose the
819 * flexibility of making the priv area sticky
820 */
821 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
822 BLOCK_NUM_PKTS(pbd1) = 0;
823 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
824 getnstimeofday(&ts);
825 h1->ts_first_pkt.ts_sec = ts.tv_sec;
826 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
827 pkc1->pkblk_start = (char *)pbd1;
828 pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
829 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
830 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
831 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
832 pbd1->version = pkc1->version;
833 pkc1->prev = pkc1->nxt_offset;
834 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
835 prb_thaw_queue(pkc1);
836 _prb_refresh_rx_retire_blk_timer(pkc1);
837
838 smp_wmb();
839
840 return;
841 }
842
843 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
844 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
845 dump_stack();
846 BUG();
847}
848
849/*
850 * Queue freeze logic:
851 * 1) Assume tp_block_nr = 8 blocks.
852 * 2) At time 't0', user opens Rx ring.
853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
854 * 4) user-space is either sleeping or processing block '0'.
855 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
856 * it will close block-7,loop around and try to fill block '0'.
857 * call-flow:
858 * __packet_lookup_frame_in_block
859 * prb_retire_current_block()
860 * prb_dispatch_next_block()
861 * |->(BLOCK_STATUS == USER) evaluates to true
862 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
863 * 6) Now there are two cases:
864 * 6.1) Link goes idle right after the queue is frozen.
865 * But remember, the last open_block() refreshed the timer.
866 * When this timer expires,it will refresh itself so that we can
867 * re-open block-0 in near future.
868 * 6.2) Link is busy and keeps on receiving packets. This is a simple
869 * case and __packet_lookup_frame_in_block will check if block-0
870 * is free and can now be re-used.
871 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000872static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000873 struct packet_sock *po)
874{
875 pkc->reset_pending_on_curr_blk = 1;
876 po->stats_u.stats3.tp_freeze_q_cnt++;
877}
878
879#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
880
881/*
882 * If the next block is free then we will dispatch it
883 * and return a good offset.
884 * Else, we will freeze the queue.
885 * So, caller must check the return value.
886 */
chetan lokebc59ba32011-08-25 10:43:30 +0000887static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000888 struct packet_sock *po)
889{
chetan lokebc59ba32011-08-25 10:43:30 +0000890 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000891
892 smp_rmb();
893
894 /* 1. Get current block num */
895 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
896
897 /* 2. If this block is currently in_use then freeze the queue */
898 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
899 prb_freeze_queue(pkc, po);
900 return NULL;
901 }
902
903 /*
904 * 3.
905 * open this block and return the offset where the first packet
906 * needs to get stored.
907 */
908 prb_open_block(pkc, pbd);
909 return (void *)pkc->nxt_offset;
910}
911
chetan lokebc59ba32011-08-25 10:43:30 +0000912static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000913 struct packet_sock *po, unsigned int status)
914{
chetan lokebc59ba32011-08-25 10:43:30 +0000915 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f12011-08-19 10:18:16 +0000916
917 /* retire/close the current block */
918 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
919 /*
920 * Plug the case where copy_bits() is in progress on
921 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
922 * have space to copy the pkt in the current block and
923 * called prb_retire_current_block()
924 *
925 * We don't need to worry about the TMO case because
926 * the timer-handler already handled this case.
927 */
928 if (!(status & TP_STATUS_BLK_TMO)) {
929 while (atomic_read(&pkc->blk_fill_in_prog)) {
930 /* Waiting for skb_copy_bits to finish... */
931 cpu_relax();
932 }
933 }
934 prb_close_block(pkc, pbd, po, status);
935 return;
936 }
937
938 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
939 dump_stack();
940 BUG();
941}
942
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000943static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000944 struct tpacket_block_desc *pbd)
chetan lokef6fb8f12011-08-19 10:18:16 +0000945{
946 return TP_STATUS_USER & BLOCK_STATUS(pbd);
947}
948
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000949static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000950{
951 return pkc->reset_pending_on_curr_blk;
952}
953
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000954static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +0000955{
chetan lokebc59ba32011-08-25 10:43:30 +0000956 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f12011-08-19 10:18:16 +0000957 atomic_dec(&pkc->blk_fill_in_prog);
958}
959
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000960static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000961 struct tpacket3_hdr *ppd)
962{
963 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
964}
965
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000966static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000967 struct tpacket3_hdr *ppd)
968{
969 ppd->hv1.tp_rxhash = 0;
970}
971
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000972static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000973 struct tpacket3_hdr *ppd)
974{
975 if (vlan_tx_tag_present(pkc->skb)) {
976 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
977 ppd->tp_status = TP_STATUS_VLAN_VALID;
978 } else {
979 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
980 }
981}
982
chetan lokebc59ba32011-08-25 10:43:30 +0000983static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000984 struct tpacket3_hdr *ppd)
985{
986 prb_fill_vlan_info(pkc, ppd);
987
988 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
989 prb_fill_rxhash(pkc, ppd);
990 else
991 prb_clear_rxhash(pkc, ppd);
992}
993
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000994static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000995 struct tpacket_kbdq_core *pkc,
996 struct tpacket_block_desc *pbd,
chetan lokef6fb8f12011-08-19 10:18:16 +0000997 unsigned int len)
998{
999 struct tpacket3_hdr *ppd;
1000
1001 ppd = (struct tpacket3_hdr *)curr;
1002 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 pkc->prev = curr;
1004 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 BLOCK_NUM_PKTS(pbd) += 1;
1007 atomic_inc(&pkc->blk_fill_in_prog);
1008 prb_run_all_ft_ops(pkc, ppd);
1009}
1010
1011/* Assumes caller has the sk->rx_queue.lock */
1012static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013 struct sk_buff *skb,
1014 int status,
1015 unsigned int len
1016 )
1017{
chetan lokebc59ba32011-08-25 10:43:30 +00001018 struct tpacket_kbdq_core *pkc;
1019 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +00001020 char *curr, *end;
1021
1022 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1023 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025 /* Queue is frozen when user space is lagging behind */
1026 if (prb_queue_frozen(pkc)) {
1027 /*
1028 * Check if that last block which caused the queue to freeze,
1029 * is still in_use by user-space.
1030 */
1031 if (prb_curr_blk_in_use(pkc, pbd)) {
1032 /* Can't record this packet */
1033 return NULL;
1034 } else {
1035 /*
1036 * Ok, the block was released by user-space.
1037 * Now let's open that block.
1038 * opening a block also thaws the queue.
1039 * Thawing is a side effect.
1040 */
1041 prb_open_block(pkc, pbd);
1042 }
1043 }
1044
1045 smp_mb();
1046 curr = pkc->nxt_offset;
1047 pkc->skb = skb;
1048 end = (char *) ((char *)pbd + pkc->kblk_size);
1049
1050 /* first try the current block */
1051 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052 prb_fill_curr_block(curr, pkc, pbd, len);
1053 return (void *)curr;
1054 }
1055
1056 /* Ok, close the current block */
1057 prb_retire_current_block(pkc, po, 0);
1058
1059 /* Now, try to dispatch the next block */
1060 curr = (char *)prb_dispatch_next_block(pkc, po);
1061 if (curr) {
1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063 prb_fill_curr_block(curr, pkc, pbd, len);
1064 return (void *)curr;
1065 }
1066
1067 /*
1068 * No free blocks are available.user_space hasn't caught up yet.
1069 * Queue was just frozen and now this packet will get dropped.
1070 */
1071 return NULL;
1072}
1073
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001074static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001075 struct sk_buff *skb,
1076 int status, unsigned int len)
1077{
1078 char *curr = NULL;
1079 switch (po->tp_version) {
1080 case TPACKET_V1:
1081 case TPACKET_V2:
1082 curr = packet_lookup_frame(po, &po->rx_ring,
1083 po->rx_ring.head, status);
1084 return curr;
1085 case TPACKET_V3:
1086 return __packet_lookup_frame_in_block(po, skb, status, len);
1087 default:
1088 WARN(1, "TPACKET version not supported\n");
1089 BUG();
1090 return 0;
1091 }
1092}
1093
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001094static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001095 struct packet_ring_buffer *rb,
1096 unsigned int previous,
1097 int status)
1098{
chetan lokebc59ba32011-08-25 10:43:30 +00001099 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1100 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
chetan lokef6fb8f12011-08-19 10:18:16 +00001101
1102 if (status != BLOCK_STATUS(pbd))
1103 return NULL;
1104 return pbd;
1105}
1106
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001107static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +00001108{
1109 unsigned int prev;
1110 if (rb->prb_bdqc.kactive_blk_num)
1111 prev = rb->prb_bdqc.kactive_blk_num-1;
1112 else
1113 prev = rb->prb_bdqc.knum_blocks-1;
1114 return prev;
1115}
1116
1117/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001118static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001119 struct packet_ring_buffer *rb,
1120 int status)
1121{
1122 unsigned int previous = prb_previous_blk_num(rb);
1123 return prb_lookup_block(po, rb, previous, status);
1124}
1125
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001126static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001127 struct packet_ring_buffer *rb,
1128 int status)
1129{
1130 if (po->tp_version <= TPACKET_V2)
1131 return packet_previous_frame(po, rb, status);
1132
1133 return __prb_previous_block(po, rb, status);
1134}
1135
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001136static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001137 struct packet_ring_buffer *rb)
1138{
1139 switch (po->tp_version) {
1140 case TPACKET_V1:
1141 case TPACKET_V2:
1142 return packet_increment_head(rb);
1143 case TPACKET_V3:
1144 default:
1145 WARN(1, "TPACKET version not supported.\n");
1146 BUG();
1147 return;
1148 }
1149}
1150
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001151static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001152 struct packet_ring_buffer *rb,
1153 int status)
1154{
1155 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1156 return packet_lookup_frame(po, rb, previous, status);
1157}
1158
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001159static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001160{
1161 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1162}
1163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164static void packet_sock_destruct(struct sock *sk)
1165{
Richard Cochraned85b562010-04-07 22:41:28 +00001166 skb_queue_purge(&sk->sk_error_queue);
1167
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001168 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1169 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
1171 if (!sock_flag(sk, SOCK_DEAD)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001172 pr_err("Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return;
1174 }
1175
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001176 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
David S. Millerdc99f602011-07-05 01:45:05 -07001179static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1180{
1181 int x = atomic_read(&f->rr_cur) + 1;
1182
1183 if (x >= num)
1184 x = 0;
1185
1186 return x;
1187}
1188
1189static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1190{
1191 u32 idx, hash = skb->rxhash;
1192
1193 idx = ((u64)hash * num) >> 32;
1194
1195 return f->arr[idx];
1196}
1197
1198static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1199{
1200 int cur, old;
1201
1202 cur = atomic_read(&f->rr_cur);
1203 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1204 fanout_rr_next(f, num))) != cur)
1205 cur = old;
1206 return f->arr[cur];
1207}
1208
David S. Miller95ec3eb2011-07-06 01:56:38 -07001209static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1210{
1211 unsigned int cpu = smp_processor_id();
1212
1213 return f->arr[cpu % num];
1214}
1215
David S. Miller95ec3eb2011-07-06 01:56:38 -07001216static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1217 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001218{
1219 struct packet_fanout *f = pt->af_packet_priv;
1220 unsigned int num = f->num_members;
1221 struct packet_sock *po;
1222 struct sock *sk;
1223
1224 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1225 !num) {
1226 kfree_skb(skb);
1227 return 0;
1228 }
1229
David S. Miller95ec3eb2011-07-06 01:56:38 -07001230 switch (f->type) {
1231 case PACKET_FANOUT_HASH:
1232 default:
1233 if (f->defrag) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001234 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001235 if (!skb)
1236 return 0;
1237 }
1238 skb_get_rxhash(skb);
1239 sk = fanout_demux_hash(f, skb, num);
1240 break;
1241 case PACKET_FANOUT_LB:
1242 sk = fanout_demux_lb(f, skb, num);
1243 break;
1244 case PACKET_FANOUT_CPU:
1245 sk = fanout_demux_cpu(f, skb, num);
1246 break;
David S. Miller7736d332011-07-05 01:43:20 -07001247 }
1248
David S. Millerdc99f602011-07-05 01:45:05 -07001249 po = pkt_sk(sk);
1250
1251 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1252}
1253
1254static DEFINE_MUTEX(fanout_mutex);
1255static LIST_HEAD(fanout_list);
1256
1257static void __fanout_link(struct sock *sk, struct packet_sock *po)
1258{
1259 struct packet_fanout *f = po->fanout;
1260
1261 spin_lock(&f->lock);
1262 f->arr[f->num_members] = sk;
1263 smp_wmb();
1264 f->num_members++;
1265 spin_unlock(&f->lock);
1266}
1267
1268static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1269{
1270 struct packet_fanout *f = po->fanout;
1271 int i;
1272
1273 spin_lock(&f->lock);
1274 for (i = 0; i < f->num_members; i++) {
1275 if (f->arr[i] == sk)
1276 break;
1277 }
1278 BUG_ON(i >= f->num_members);
1279 f->arr[i] = f->arr[f->num_members - 1];
1280 f->num_members--;
1281 spin_unlock(&f->lock);
1282}
1283
David S. Miller7736d332011-07-05 01:43:20 -07001284static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001285{
1286 struct packet_sock *po = pkt_sk(sk);
1287 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001288 u8 type = type_flags & 0xff;
1289 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001290 int err;
1291
1292 switch (type) {
1293 case PACKET_FANOUT_HASH:
1294 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001295 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001296 break;
1297 default:
1298 return -EINVAL;
1299 }
1300
1301 if (!po->running)
1302 return -EINVAL;
1303
1304 if (po->fanout)
1305 return -EALREADY;
1306
1307 mutex_lock(&fanout_mutex);
1308 match = NULL;
1309 list_for_each_entry(f, &fanout_list, list) {
1310 if (f->id == id &&
1311 read_pnet(&f->net) == sock_net(sk)) {
1312 match = f;
1313 break;
1314 }
1315 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001316 err = -EINVAL;
David S. Miller7736d332011-07-05 01:43:20 -07001317 if (match && match->defrag != defrag)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001318 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001319 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001320 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001321 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001322 if (!match)
1323 goto out;
1324 write_pnet(&match->net, sock_net(sk));
1325 match->id = id;
1326 match->type = type;
1327 match->defrag = defrag;
1328 atomic_set(&match->rr_cur, 0);
1329 INIT_LIST_HEAD(&match->list);
1330 spin_lock_init(&match->lock);
1331 atomic_set(&match->sk_ref, 0);
1332 match->prot_hook.type = po->prot_hook.type;
1333 match->prot_hook.dev = po->prot_hook.dev;
1334 match->prot_hook.func = packet_rcv_fanout;
1335 match->prot_hook.af_packet_priv = match;
1336 dev_add_pack(&match->prot_hook);
1337 list_add(&match->list, &fanout_list);
1338 }
1339 err = -EINVAL;
1340 if (match->type == type &&
1341 match->prot_hook.type == po->prot_hook.type &&
1342 match->prot_hook.dev == po->prot_hook.dev) {
1343 err = -ENOSPC;
1344 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1345 __dev_remove_pack(&po->prot_hook);
1346 po->fanout = match;
1347 atomic_inc(&match->sk_ref);
1348 __fanout_link(sk, po);
1349 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001350 }
1351 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001352out:
David S. Millerdc99f602011-07-05 01:45:05 -07001353 mutex_unlock(&fanout_mutex);
1354 return err;
1355}
1356
1357static void fanout_release(struct sock *sk)
1358{
1359 struct packet_sock *po = pkt_sk(sk);
1360 struct packet_fanout *f;
1361
1362 f = po->fanout;
1363 if (!f)
1364 return;
1365
1366 po->fanout = NULL;
1367
1368 mutex_lock(&fanout_mutex);
1369 if (atomic_dec_and_test(&f->sk_ref)) {
1370 list_del(&f->list);
1371 dev_remove_pack(&f->prot_hook);
1372 kfree(f);
1373 }
1374 mutex_unlock(&fanout_mutex);
1375}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001377static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001379static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001381static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1382 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
1384 struct sock *sk;
1385 struct sockaddr_pkt *spkt;
1386
1387 /*
1388 * When we registered the protocol we saved the socket in the data
1389 * field for just this event.
1390 */
1391
1392 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 /*
1395 * Yank back the headers [hope the device set this
1396 * right or kerboom...]
1397 *
1398 * Incoming packets have ll header pulled,
1399 * push it back.
1400 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001401 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 * so that this procedure is noop.
1403 */
1404
1405 if (skb->pkt_type == PACKET_LOOPBACK)
1406 goto out;
1407
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001408 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001409 goto out;
1410
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001411 skb = skb_share_check(skb, GFP_ATOMIC);
1412 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 goto oom;
1414
1415 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001416 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
Phil Oester84531c22005-07-12 11:57:52 -07001418 /* drop conntrack reference */
1419 nf_reset(skb);
1420
Herbert Xuffbc6112007-02-04 23:33:10 -08001421 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001423 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 /*
1426 * The SOCK_PACKET socket receives _all_ frames.
1427 */
1428
1429 spkt->spkt_family = dev->type;
1430 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1431 spkt->spkt_protocol = skb->protocol;
1432
1433 /*
1434 * Charge the memory to the socket. This is done specifically
1435 * to prevent sockets using all the memory up.
1436 */
1437
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001438 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 return 0;
1440
1441out:
1442 kfree_skb(skb);
1443oom:
1444 return 0;
1445}
1446
1447
1448/*
1449 * Output a raw packet to a device layer. This bypasses all the other
1450 * protocol layers and you must therefore supply it with a complete frame
1451 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1454 struct msghdr *msg, size_t len)
1455{
1456 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001457 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001458 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001460 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 int err;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001464 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 */
1466
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001467 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001469 return -EINVAL;
1470 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1471 proto = saddr->spkt_protocol;
1472 } else
1473 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
1475 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001476 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 */
1478
1479 saddr->spkt_device[13] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001480retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001481 rcu_read_lock();
1482 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 err = -ENODEV;
1484 if (dev == NULL)
1485 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001486
David S. Millerd5e76b02007-01-25 19:30:36 -08001487 err = -ENETDOWN;
1488 if (!(dev->flags & IFF_UP))
1489 goto out_unlock;
1490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001492 * You may not queue a frame bigger than the mtu. This is the lowest level
1493 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 err = -EMSGSIZE;
Ben Greear57f89bf2011-02-11 09:35:18 +00001497 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 goto out_unlock;
1499
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001500 if (!skb) {
1501 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001502 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001503 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001505 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001506 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001507 if (skb == NULL)
1508 return -ENOBUFS;
1509 /* FIXME: Save some space for broken drivers that write a hard
1510 * header at transmission time by themselves. PPP is the notable
1511 * one here. This should really be fixed at the driver level.
1512 */
1513 skb_reserve(skb, reserved);
1514 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001515
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001516 /* Try to align data part correctly */
1517 if (hhlen) {
1518 skb->data -= hhlen;
1519 skb->tail -= hhlen;
1520 if (len < hhlen)
1521 skb_reset_network_header(skb);
1522 }
1523 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1524 if (err)
1525 goto out_free;
1526 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528
Ben Greear57f89bf2011-02-11 09:35:18 +00001529 if (len > (dev->mtu + dev->hard_header_len)) {
1530 /* Earlier code assumed this would be a VLAN pkt,
1531 * double-check this now that we have the actual
1532 * packet in hand.
1533 */
1534 struct ethhdr *ehdr;
1535 skb_reset_mac_header(skb);
1536 ehdr = eth_hdr(skb);
1537 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1538 err = -EMSGSIZE;
1539 goto out_unlock;
1540 }
1541 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 skb->protocol = proto;
1544 skb->dev = dev;
1545 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001546 skb->mark = sk->sk_mark;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001547 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00001548 if (err < 0)
1549 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
1551 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001552 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001553 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001556 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001557out_free:
1558 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 return err;
1560}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001562static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001563 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001564 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
1566 struct sk_filter *filter;
1567
Eric Dumazet80f8f102011-01-18 07:46:52 +00001568 rcu_read_lock();
1569 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001570 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001571 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001572 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
David S. Millerdbcb5852007-01-24 15:21:02 -08001574 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
1577/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001578 * This function makes lazy skb cloning in hope that most of packets
1579 * are discarded by BPF.
1580 *
1581 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1582 * and skb->cb are mangled. It works because (and until) packets
1583 * falling here are owned by current CPU. Output packets are cloned
1584 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1585 * sequencially, so that if we return skb to original state on exit,
1586 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 */
1588
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001589static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1590 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
1592 struct sock *sk;
1593 struct sockaddr_ll *sll;
1594 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001595 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001597 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 if (skb->pkt_type == PACKET_LOOPBACK)
1600 goto drop;
1601
1602 sk = pt->af_packet_priv;
1603 po = pkt_sk(sk);
1604
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001605 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001606 goto drop;
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 skb->dev = dev;
1609
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001610 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001612 * exported to higher levels.
1613 *
1614 * Otherwise, the device hides details of its frame
1615 * structure, so that corresponding packet head is
1616 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 */
1618 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001619 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 else if (skb->pkt_type == PACKET_OUTGOING) {
1621 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001622 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 }
1624 }
1625
1626 snaplen = skb->len;
1627
David S. Millerdbcb5852007-01-24 15:21:02 -08001628 res = run_filter(skb, sk, snaplen);
1629 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001630 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001631 if (snaplen > res)
1632 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1635 (unsigned)sk->sk_rcvbuf)
1636 goto drop_n_acct;
1637
1638 if (skb_shared(skb)) {
1639 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1640 if (nskb == NULL)
1641 goto drop_n_acct;
1642
1643 if (skb_head != skb->data) {
1644 skb->data = skb_head;
1645 skb->len = skb_len;
1646 }
1647 kfree_skb(skb);
1648 skb = nskb;
1649 }
1650
Herbert Xuffbc6112007-02-04 23:33:10 -08001651 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1652 sizeof(skb->cb));
1653
1654 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 sll->sll_family = AF_PACKET;
1656 sll->sll_hatype = dev->type;
1657 sll->sll_protocol = skb->protocol;
1658 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001659 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001660 sll->sll_ifindex = orig_dev->ifindex;
1661 else
1662 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001664 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
Herbert Xuffbc6112007-02-04 23:33:10 -08001666 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 if (pskb_trim(skb, snaplen))
1669 goto drop_n_acct;
1670
1671 skb_set_owner_r(skb, sk);
1672 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001673 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Phil Oester84531c22005-07-12 11:57:52 -07001675 /* drop conntrack reference */
1676 nf_reset(skb);
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 spin_lock(&sk->sk_receive_queue.lock);
1679 po->stats.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001680 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 __skb_queue_tail(&sk->sk_receive_queue, skb);
1682 spin_unlock(&sk->sk_receive_queue.lock);
1683 sk->sk_data_ready(sk, skb->len);
1684 return 0;
1685
1686drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001687 spin_lock(&sk->sk_receive_queue.lock);
1688 po->stats.tp_drops++;
1689 atomic_inc(&sk->sk_drops);
1690 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
1692drop_n_restore:
1693 if (skb_head != skb->data && skb_shared(skb)) {
1694 skb->data = skb_head;
1695 skb->len = skb_len;
1696 }
1697drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001698 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 return 0;
1700}
1701
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001702static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1703 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704{
1705 struct sock *sk;
1706 struct packet_sock *po;
1707 struct sockaddr_ll *sll;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001708 union {
1709 struct tpacket_hdr *h1;
1710 struct tpacket2_hdr *h2;
chetan lokef6fb8f12011-08-19 10:18:16 +00001711 struct tpacket3_hdr *h3;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001712 void *raw;
1713 } h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001714 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001716 unsigned int snaplen, res;
chetan lokef6fb8f12011-08-19 10:18:16 +00001717 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001718 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 struct sk_buff *copy_skb = NULL;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001720 struct timeval tv;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001721 struct timespec ts;
Scott McMillan614f60f2010-06-02 05:53:56 -07001722 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 if (skb->pkt_type == PACKET_LOOPBACK)
1725 goto drop;
1726
1727 sk = pt->af_packet_priv;
1728 po = pkt_sk(sk);
1729
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001730 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001731 goto drop;
1732
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001733 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001735 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 else if (skb->pkt_type == PACKET_OUTGOING) {
1737 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001738 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 }
1740 }
1741
Herbert Xu8dc41942007-02-04 23:31:32 -08001742 if (skb->ip_summed == CHECKSUM_PARTIAL)
1743 status |= TP_STATUS_CSUMNOTREADY;
1744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 snaplen = skb->len;
1746
David S. Millerdbcb5852007-01-24 15:21:02 -08001747 res = run_filter(skb, sk, snaplen);
1748 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001749 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001750 if (snaplen > res)
1751 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
1753 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy89133362008-07-18 18:05:19 -07001754 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1755 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 } else {
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001757 unsigned maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001758 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy89133362008-07-18 18:05:19 -07001759 (maclen < 16 ? 16 : maclen)) +
1760 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 macoff = netoff - maclen;
1762 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001763 if (po->tp_version <= TPACKET_V2) {
1764 if (macoff + snaplen > po->rx_ring.frame_size) {
1765 if (po->copy_thresh &&
1766 atomic_read(&sk->sk_rmem_alloc) + skb->truesize
1767 < (unsigned)sk->sk_rcvbuf) {
1768 if (skb_shared(skb)) {
1769 copy_skb = skb_clone(skb, GFP_ATOMIC);
1770 } else {
1771 copy_skb = skb_get(skb);
1772 skb_head = skb->data;
1773 }
1774 if (copy_skb)
1775 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001777 snaplen = po->rx_ring.frame_size - macoff;
1778 if ((int)snaplen < 0)
1779 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00001783 h.raw = packet_current_rx_frame(po, skb,
1784 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001785 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 goto ring_is_full;
chetan lokef6fb8f12011-08-19 10:18:16 +00001787 if (po->tp_version <= TPACKET_V2) {
1788 packet_increment_rx_head(po, &po->rx_ring);
1789 /*
1790 * LOSING will be reported till you read the stats,
1791 * because it's COR - Clear On Read.
1792 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1793 * at packet level.
1794 */
1795 if (po->stats.tp_drops)
1796 status |= TP_STATUS_LOSING;
1797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 po->stats.tp_packets++;
1799 if (copy_skb) {
1800 status |= TP_STATUS_COPY;
1801 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 spin_unlock(&sk->sk_receive_queue.lock);
1804
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001805 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001807 switch (po->tp_version) {
1808 case TPACKET_V1:
1809 h.h1->tp_len = skb->len;
1810 h.h1->tp_snaplen = snaplen;
1811 h.h1->tp_mac = macoff;
1812 h.h1->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001813 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1814 && shhwtstamps->syststamp.tv64)
1815 tv = ktime_to_timeval(shhwtstamps->syststamp);
1816 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1817 && shhwtstamps->hwtstamp.tv64)
1818 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1819 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001820 tv = ktime_to_timeval(skb->tstamp);
1821 else
1822 do_gettimeofday(&tv);
1823 h.h1->tp_sec = tv.tv_sec;
1824 h.h1->tp_usec = tv.tv_usec;
1825 hdrlen = sizeof(*h.h1);
1826 break;
1827 case TPACKET_V2:
1828 h.h2->tp_len = skb->len;
1829 h.h2->tp_snaplen = snaplen;
1830 h.h2->tp_mac = macoff;
1831 h.h2->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001832 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1833 && shhwtstamps->syststamp.tv64)
1834 ts = ktime_to_timespec(shhwtstamps->syststamp);
1835 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1836 && shhwtstamps->hwtstamp.tv64)
1837 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1838 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001839 ts = ktime_to_timespec(skb->tstamp);
1840 else
1841 getnstimeofday(&ts);
1842 h.h2->tp_sec = ts.tv_sec;
1843 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001844 if (vlan_tx_tag_present(skb)) {
1845 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1846 status |= TP_STATUS_VLAN_VALID;
1847 } else {
1848 h.h2->tp_vlan_tci = 0;
1849 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001850 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001851 hdrlen = sizeof(*h.h2);
1852 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00001853 case TPACKET_V3:
1854 /* tp_nxt_offset,vlan are already populated above.
1855 * So DONT clear those fields here
1856 */
1857 h.h3->tp_status |= status;
1858 h.h3->tp_len = skb->len;
1859 h.h3->tp_snaplen = snaplen;
1860 h.h3->tp_mac = macoff;
1861 h.h3->tp_net = netoff;
1862 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1863 && shhwtstamps->syststamp.tv64)
1864 ts = ktime_to_timespec(shhwtstamps->syststamp);
1865 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1866 && shhwtstamps->hwtstamp.tv64)
1867 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1868 else if (skb->tstamp.tv64)
1869 ts = ktime_to_timespec(skb->tstamp);
1870 else
1871 getnstimeofday(&ts);
1872 h.h3->tp_sec = ts.tv_sec;
1873 h.h3->tp_nsec = ts.tv_nsec;
1874 hdrlen = sizeof(*h.h3);
1875 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001876 default:
1877 BUG();
1878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001880 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001881 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 sll->sll_family = AF_PACKET;
1883 sll->sll_hatype = dev->type;
1884 sll->sll_protocol = skb->protocol;
1885 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001886 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001887 sll->sll_ifindex = orig_dev->ifindex;
1888 else
1889 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
Ralf Baechlee16aa202006-12-07 00:11:33 -08001891 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001892#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001894 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
chetan lokef6fb8f12011-08-19 10:18:16 +00001896 if (po->tp_version <= TPACKET_V2) {
1897 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1898 + macoff + snaplen);
1899 for (start = h.raw; start < end; start += PAGE_SIZE)
1900 flush_dcache_page(pgv_to_page(start));
1901 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001902 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001904#endif
chetan lokef6fb8f12011-08-19 10:18:16 +00001905 if (po->tp_version <= TPACKET_V2)
1906 __packet_set_status(po, h.raw, status);
1907 else
1908 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 sk->sk_data_ready(sk, 0);
1911
1912drop_n_restore:
1913 if (skb_head != skb->data && skb_shared(skb)) {
1914 skb->data = skb_head;
1915 skb->len = skb_len;
1916 }
1917drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001918 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 return 0;
1920
1921ring_is_full:
1922 po->stats.tp_drops++;
1923 spin_unlock(&sk->sk_receive_queue.lock);
1924
1925 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001926 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 goto drop_n_restore;
1928}
1929
Johann Baudy69e3c752009-05-18 22:11:22 -07001930static void tpacket_destruct_skb(struct sk_buff *skb)
1931{
1932 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001933 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001934
Johann Baudy69e3c752009-05-18 22:11:22 -07001935 if (likely(po->tx_ring.pg_vec)) {
1936 ph = skb_shinfo(skb)->destructor_arg;
1937 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
1938 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1939 atomic_dec(&po->tx_ring.pending);
1940 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1941 }
1942
1943 sock_wfree(skb);
1944}
1945
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001946static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1947 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001948 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001949{
1950 union {
1951 struct tpacket_hdr *h1;
1952 struct tpacket2_hdr *h2;
1953 void *raw;
1954 } ph;
1955 int to_write, offset, len, tp_len, nr_frags, len_max;
1956 struct socket *sock = po->sk.sk_socket;
1957 struct page *page;
1958 void *data;
1959 int err;
1960
1961 ph.raw = frame;
1962
1963 skb->protocol = proto;
1964 skb->dev = dev;
1965 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001966 skb->mark = po->sk.sk_mark;
Johann Baudy69e3c752009-05-18 22:11:22 -07001967 skb_shinfo(skb)->destructor_arg = ph.raw;
1968
1969 switch (po->tp_version) {
1970 case TPACKET_V2:
1971 tp_len = ph.h2->tp_len;
1972 break;
1973 default:
1974 tp_len = ph.h1->tp_len;
1975 break;
1976 }
1977 if (unlikely(tp_len > size_max)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001978 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
Johann Baudy69e3c752009-05-18 22:11:22 -07001979 return -EMSGSIZE;
1980 }
1981
Herbert Xuae641942011-11-18 02:20:04 +00001982 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07001983 skb_reset_network_header(skb);
1984
1985 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1986 to_write = tp_len;
1987
1988 if (sock->type == SOCK_DGRAM) {
1989 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1990 NULL, tp_len);
1991 if (unlikely(err < 0))
1992 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001993 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07001994 /* net device doesn't like empty head */
1995 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001996 pr_err("packet size is too short (%d < %d)\n",
1997 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07001998 return -EINVAL;
1999 }
2000
2001 skb_push(skb, dev->hard_header_len);
2002 err = skb_store_bits(skb, 0, data,
2003 dev->hard_header_len);
2004 if (unlikely(err))
2005 return err;
2006
2007 data += dev->hard_header_len;
2008 to_write -= dev->hard_header_len;
2009 }
2010
2011 err = -EFAULT;
Johann Baudy69e3c752009-05-18 22:11:22 -07002012 offset = offset_in_page(data);
2013 len_max = PAGE_SIZE - offset;
2014 len = ((to_write > len_max) ? len_max : to_write);
2015
2016 skb->data_len = to_write;
2017 skb->len += to_write;
2018 skb->truesize += to_write;
2019 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2020
2021 while (likely(to_write)) {
2022 nr_frags = skb_shinfo(skb)->nr_frags;
2023
2024 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002025 pr_err("Packet exceed the number of skb frags(%lu)\n",
2026 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07002027 return -EFAULT;
2028 }
2029
Changli Gao0af55bb2010-12-01 02:52:20 +00002030 page = pgv_to_page(data);
2031 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07002032 flush_dcache_page(page);
2033 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00002034 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002035 to_write -= len;
2036 offset = 0;
2037 len_max = PAGE_SIZE;
2038 len = ((to_write > len_max) ? len_max : to_write);
2039 }
2040
2041 return tp_len;
2042}
2043
2044static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2045{
Johann Baudy69e3c752009-05-18 22:11:22 -07002046 struct sk_buff *skb;
2047 struct net_device *dev;
2048 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002049 bool need_rls_dev = false;
2050 int err, reserve = 0;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002051 void *ph;
2052 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07002053 int tp_len, size_max;
2054 unsigned char *addr;
2055 int len_sum = 0;
2056 int status = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002057 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07002058
Johann Baudy69e3c752009-05-18 22:11:22 -07002059 mutex_lock(&po->pg_vec_lock);
2060
2061 err = -EBUSY;
2062 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002063 dev = po->prot_hook.dev;
Johann Baudy69e3c752009-05-18 22:11:22 -07002064 proto = po->num;
2065 addr = NULL;
2066 } else {
2067 err = -EINVAL;
2068 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2069 goto out;
2070 if (msg->msg_namelen < (saddr->sll_halen
2071 + offsetof(struct sockaddr_ll,
2072 sll_addr)))
2073 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002074 proto = saddr->sll_protocol;
2075 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002076 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2077 need_rls_dev = true;
Johann Baudy69e3c752009-05-18 22:11:22 -07002078 }
2079
Johann Baudy69e3c752009-05-18 22:11:22 -07002080 err = -ENXIO;
2081 if (unlikely(dev == NULL))
2082 goto out;
2083
2084 reserve = dev->hard_header_len;
2085
2086 err = -ENETDOWN;
2087 if (unlikely(!(dev->flags & IFF_UP)))
2088 goto out_put;
2089
2090 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002091 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002092
2093 if (size_max > dev->mtu + reserve)
2094 size_max = dev->mtu + reserve;
2095
2096 do {
2097 ph = packet_current_frame(po, &po->tx_ring,
2098 TP_STATUS_SEND_REQUEST);
2099
2100 if (unlikely(ph == NULL)) {
2101 schedule();
2102 continue;
2103 }
2104
2105 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002106 hlen = LL_RESERVED_SPACE(dev);
2107 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002108 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002109 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002110 0, &err);
2111
2112 if (unlikely(skb == NULL))
2113 goto out_status;
2114
2115 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002116 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002117
2118 if (unlikely(tp_len < 0)) {
2119 if (po->tp_loss) {
2120 __packet_set_status(po, ph,
2121 TP_STATUS_AVAILABLE);
2122 packet_increment_head(&po->tx_ring);
2123 kfree_skb(skb);
2124 continue;
2125 } else {
2126 status = TP_STATUS_WRONG_FORMAT;
2127 err = tp_len;
2128 goto out_status;
2129 }
2130 }
2131
2132 skb->destructor = tpacket_destruct_skb;
2133 __packet_set_status(po, ph, TP_STATUS_SENDING);
2134 atomic_inc(&po->tx_ring.pending);
2135
2136 status = TP_STATUS_SEND_REQUEST;
2137 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002138 if (unlikely(err > 0)) {
2139 err = net_xmit_errno(err);
2140 if (err && __packet_get_status(po, ph) ==
2141 TP_STATUS_AVAILABLE) {
2142 /* skb was destructed already */
2143 skb = NULL;
2144 goto out_status;
2145 }
2146 /*
2147 * skb was dropped but not destructed yet;
2148 * let's treat it like congestion or err < 0
2149 */
2150 err = 0;
2151 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002152 packet_increment_head(&po->tx_ring);
2153 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002154 } while (likely((ph != NULL) ||
2155 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2156 (atomic_read(&po->tx_ring.pending))))
2157 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002158
2159 err = len_sum;
2160 goto out_put;
2161
Johann Baudy69e3c752009-05-18 22:11:22 -07002162out_status:
2163 __packet_set_status(po, ph, status);
2164 kfree_skb(skb);
2165out_put:
Ben Greear827d9782011-06-01 07:18:53 +00002166 if (need_rls_dev)
2167 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002168out:
2169 mutex_unlock(&po->pg_vec_lock);
2170 return err;
2171}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002173static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2174 size_t reserve, size_t len,
2175 size_t linear, int noblock,
2176 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002177{
2178 struct sk_buff *skb;
2179
2180 /* Under a page? Don't bother with paged skb. */
2181 if (prepad + len < PAGE_SIZE || !linear)
2182 linear = len;
2183
2184 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2185 err);
2186 if (!skb)
2187 return NULL;
2188
2189 skb_reserve(skb, reserve);
2190 skb_put(skb, linear);
2191 skb->data_len = len - linear;
2192 skb->len += len - linear;
2193
2194 return skb;
2195}
2196
Johann Baudy69e3c752009-05-18 22:11:22 -07002197static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 struct msghdr *msg, size_t len)
2199{
2200 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002201 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 struct sk_buff *skb;
2203 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002204 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002205 bool need_rls_dev = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002207 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002208 struct virtio_net_hdr vnet_hdr = { 0 };
2209 int offset = 0;
2210 int vnet_hdr_len;
2211 struct packet_sock *po = pkt_sk(sk);
2212 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002213 int hlen, tlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002216 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002218
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002220 dev = po->prot_hook.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 proto = po->num;
2222 addr = NULL;
2223 } else {
2224 err = -EINVAL;
2225 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2226 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002227 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2228 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 proto = saddr->sll_protocol;
2230 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002231 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2232 need_rls_dev = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 }
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 err = -ENXIO;
2236 if (dev == NULL)
2237 goto out_unlock;
2238 if (sock->type == SOCK_RAW)
2239 reserve = dev->hard_header_len;
2240
David S. Millerd5e76b02007-01-25 19:30:36 -08002241 err = -ENETDOWN;
2242 if (!(dev->flags & IFF_UP))
2243 goto out_unlock;
2244
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002245 if (po->has_vnet_hdr) {
2246 vnet_hdr_len = sizeof(vnet_hdr);
2247
2248 err = -EINVAL;
2249 if (len < vnet_hdr_len)
2250 goto out_unlock;
2251
2252 len -= vnet_hdr_len;
2253
2254 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2255 vnet_hdr_len);
2256 if (err < 0)
2257 goto out_unlock;
2258
2259 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2260 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2261 vnet_hdr.hdr_len))
2262 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2263 vnet_hdr.csum_offset + 2;
2264
2265 err = -EINVAL;
2266 if (vnet_hdr.hdr_len > len)
2267 goto out_unlock;
2268
2269 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2270 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2271 case VIRTIO_NET_HDR_GSO_TCPV4:
2272 gso_type = SKB_GSO_TCPV4;
2273 break;
2274 case VIRTIO_NET_HDR_GSO_TCPV6:
2275 gso_type = SKB_GSO_TCPV6;
2276 break;
2277 case VIRTIO_NET_HDR_GSO_UDP:
2278 gso_type = SKB_GSO_UDP;
2279 break;
2280 default:
2281 goto out_unlock;
2282 }
2283
2284 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2285 gso_type |= SKB_GSO_TCP_ECN;
2286
2287 if (vnet_hdr.gso_size == 0)
2288 goto out_unlock;
2289
2290 }
2291 }
2292
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 err = -EMSGSIZE;
Ben Greear57f89bf2011-02-11 09:35:18 +00002294 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 goto out_unlock;
2296
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002297 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002298 hlen = LL_RESERVED_SPACE(dev);
2299 tlen = dev->needed_tailroom;
2300 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002301 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002302 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 goto out_unlock;
2304
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002305 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002307 err = -EINVAL;
2308 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002309 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002310 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
2312 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002313 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 if (err)
2315 goto out_free;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002316 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00002317 if (err < 0)
2318 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
Ben Greear57f89bf2011-02-11 09:35:18 +00002320 if (!gso_type && (len > dev->mtu + reserve)) {
2321 /* Earlier code assumed this would be a VLAN pkt,
2322 * double-check this now that we have the actual
2323 * packet in hand.
2324 */
2325 struct ethhdr *ehdr;
2326 skb_reset_mac_header(skb);
2327 ehdr = eth_hdr(skb);
2328 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2329 err = -EMSGSIZE;
2330 goto out_free;
2331 }
2332 }
2333
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 skb->protocol = proto;
2335 skb->dev = dev;
2336 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002337 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002339 if (po->has_vnet_hdr) {
2340 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2341 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2342 vnet_hdr.csum_offset)) {
2343 err = -EINVAL;
2344 goto out_free;
2345 }
2346 }
2347
2348 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2349 skb_shinfo(skb)->gso_type = gso_type;
2350
2351 /* Header must be checked, and gso_segs computed. */
2352 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2353 skb_shinfo(skb)->gso_segs = 0;
2354
2355 len += vnet_hdr_len;
2356 }
2357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 /*
2359 * Now send it
2360 */
2361
2362 err = dev_queue_xmit(skb);
2363 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2364 goto out_unlock;
2365
Ben Greear827d9782011-06-01 07:18:53 +00002366 if (need_rls_dev)
2367 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002369 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
2371out_free:
2372 kfree_skb(skb);
2373out_unlock:
Ben Greear827d9782011-06-01 07:18:53 +00002374 if (dev && need_rls_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 dev_put(dev);
2376out:
2377 return err;
2378}
2379
Johann Baudy69e3c752009-05-18 22:11:22 -07002380static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2381 struct msghdr *msg, size_t len)
2382{
Johann Baudy69e3c752009-05-18 22:11:22 -07002383 struct sock *sk = sock->sk;
2384 struct packet_sock *po = pkt_sk(sk);
2385 if (po->tx_ring.pg_vec)
2386 return tpacket_snd(po, msg);
2387 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002388 return packet_snd(sock, msg, len);
2389}
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391/*
2392 * Close a PACKET socket. This is fairly simple. We immediately go
2393 * to 'closed' state and remove our protocol entry in the device list.
2394 */
2395
2396static int packet_release(struct socket *sock)
2397{
2398 struct sock *sk = sock->sk;
2399 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002400 struct net *net;
chetan lokef6fb8f12011-08-19 10:18:16 +00002401 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
2403 if (!sk)
2404 return 0;
2405
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002406 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 po = pkt_sk(sk);
2408
stephen hemminger808f5112010-02-22 07:57:18 +00002409 spin_lock_bh(&net->packet.sklist_lock);
2410 sk_del_node_init_rcu(sk);
Eric Dumazet920de802008-11-24 00:09:29 -08002411 sock_prot_inuse_add(net, sk->sk_prot, -1);
stephen hemminger808f5112010-02-22 07:57:18 +00002412 spin_unlock_bh(&net->packet.sklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
stephen hemminger808f5112010-02-22 07:57:18 +00002414 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002415 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002416 if (po->prot_hook.dev) {
2417 dev_put(po->prot_hook.dev);
2418 po->prot_hook.dev = NULL;
2419 }
stephen hemminger808f5112010-02-22 07:57:18 +00002420 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
chetan lokef6fb8f12011-08-19 10:18:16 +00002424 memset(&req_u, 0, sizeof(req_u));
Johann Baudy69e3c752009-05-18 22:11:22 -07002425
2426 if (po->rx_ring.pg_vec)
chetan lokef6fb8f12011-08-19 10:18:16 +00002427 packet_set_ring(sk, &req_u, 1, 0);
Johann Baudy69e3c752009-05-18 22:11:22 -07002428
2429 if (po->tx_ring.pg_vec)
chetan lokef6fb8f12011-08-19 10:18:16 +00002430 packet_set_ring(sk, &req_u, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431
David S. Millerdc99f602011-07-05 01:45:05 -07002432 fanout_release(sk);
2433
stephen hemminger808f5112010-02-22 07:57:18 +00002434 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 /*
2436 * Now the socket is dead. No more input will appear.
2437 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 sock_orphan(sk);
2439 sock->sk = NULL;
2440
2441 /* Purge queues */
2442
2443 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002444 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
2446 sock_put(sk);
2447 return 0;
2448}
2449
2450/*
2451 * Attach a packet hook.
2452 */
2453
Al Viro0e11c912006-11-08 00:26:29 -08002454static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455{
2456 struct packet_sock *po = pkt_sk(sk);
David S. Millerdc99f602011-07-05 01:45:05 -07002457
2458 if (po->fanout)
2459 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460
2461 lock_sock(sk);
2462
2463 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002464 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 po->num = protocol;
2466 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002467 if (po->prot_hook.dev)
2468 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 po->prot_hook.dev = dev;
2470
2471 po->ifindex = dev ? dev->ifindex : 0;
2472
2473 if (protocol == 0)
2474 goto out_unlock;
2475
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002476 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002477 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002478 } else {
2479 sk->sk_err = ENETDOWN;
2480 if (!sock_flag(sk, SOCK_DEAD))
2481 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 }
2483
2484out_unlock:
2485 spin_unlock(&po->bind_lock);
2486 release_sock(sk);
2487 return 0;
2488}
2489
2490/*
2491 * Bind a packet socket to a device
2492 */
2493
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002494static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2495 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002497 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 char name[15];
2499 struct net_device *dev;
2500 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 /*
2503 * Check legality
2504 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002505
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002506 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002508 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002510 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002511 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 return err;
2514}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
2516static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2517{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002518 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2519 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 struct net_device *dev = NULL;
2521 int err;
2522
2523
2524 /*
2525 * Check legality
2526 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 if (addr_len < sizeof(struct sockaddr_ll))
2529 return -EINVAL;
2530 if (sll->sll_family != AF_PACKET)
2531 return -EINVAL;
2532
2533 if (sll->sll_ifindex) {
2534 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002535 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 if (dev == NULL)
2537 goto out;
2538 }
2539 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
2541out:
2542 return err;
2543}
2544
2545static struct proto packet_proto = {
2546 .name = "PACKET",
2547 .owner = THIS_MODULE,
2548 .obj_size = sizeof(struct packet_sock),
2549};
2550
2551/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002552 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 */
2554
Eric Paris3f378b62009-11-05 22:18:14 -08002555static int packet_create(struct net *net, struct socket *sock, int protocol,
2556 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557{
2558 struct sock *sk;
2559 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002560 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 int err;
2562
2563 if (!capable(CAP_NET_RAW))
2564 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002565 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2566 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 return -ESOCKTNOSUPPORT;
2568
2569 sock->state = SS_UNCONNECTED;
2570
2571 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002572 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 if (sk == NULL)
2574 goto out;
2575
2576 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 if (sock->type == SOCK_PACKET)
2578 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 sock_init_data(sock, sk);
2581
2582 po = pkt_sk(sk);
2583 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002584 po->num = proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585
2586 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002587 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
2589 /*
2590 * Attach a protocol block
2591 */
2592
2593 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002594 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 if (sock->type == SOCK_PACKET)
2598 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 po->prot_hook.af_packet_priv = sk;
2601
Al Viro0e11c912006-11-08 00:26:29 -08002602 if (proto) {
2603 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002604 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 }
2606
stephen hemminger808f5112010-02-22 07:57:18 +00002607 spin_lock_bh(&net->packet.sklist_lock);
2608 sk_add_node_rcu(sk, &net->packet.sklist);
Eric Dumazet36804532008-11-19 14:25:35 -08002609 sock_prot_inuse_add(net, &packet_proto, 1);
stephen hemminger808f5112010-02-22 07:57:18 +00002610 spin_unlock_bh(&net->packet.sklist_lock);
2611
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002612 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613out:
2614 return err;
2615}
2616
Richard Cochraned85b562010-04-07 22:41:28 +00002617static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2618{
2619 struct sock_exterr_skb *serr;
2620 struct sk_buff *skb, *skb2;
2621 int copied, err;
2622
2623 err = -EAGAIN;
2624 skb = skb_dequeue(&sk->sk_error_queue);
2625 if (skb == NULL)
2626 goto out;
2627
2628 copied = skb->len;
2629 if (copied > len) {
2630 msg->msg_flags |= MSG_TRUNC;
2631 copied = len;
2632 }
2633 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2634 if (err)
2635 goto out_free_skb;
2636
2637 sock_recv_timestamp(msg, sk, skb);
2638
2639 serr = SKB_EXT_ERR(skb);
2640 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2641 sizeof(serr->ee), &serr->ee);
2642
2643 msg->msg_flags |= MSG_ERRQUEUE;
2644 err = copied;
2645
2646 /* Reset and regenerate socket error */
2647 spin_lock_bh(&sk->sk_error_queue.lock);
2648 sk->sk_err = 0;
2649 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2650 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2651 spin_unlock_bh(&sk->sk_error_queue.lock);
2652 sk->sk_error_report(sk);
2653 } else
2654 spin_unlock_bh(&sk->sk_error_queue.lock);
2655
2656out_free_skb:
2657 kfree_skb(skb);
2658out:
2659 return err;
2660}
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662/*
2663 * Pull a packet from our receive queue and hand it to the user.
2664 * If necessary we block.
2665 */
2666
2667static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2668 struct msghdr *msg, size_t len, int flags)
2669{
2670 struct sock *sk = sock->sk;
2671 struct sk_buff *skb;
2672 int copied, err;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002673 struct sockaddr_ll *sll;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002674 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
2676 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002677 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 goto out;
2679
2680#if 0
2681 /* What error should we return now? EUNATTACH? */
2682 if (pkt_sk(sk)->ifindex < 0)
2683 return -ENODEV;
2684#endif
2685
Richard Cochraned85b562010-04-07 22:41:28 +00002686 if (flags & MSG_ERRQUEUE) {
2687 err = packet_recv_error(sk, msg, len);
2688 goto out;
2689 }
2690
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 * Call the generic datagram receiver. This handles all sorts
2693 * of horrible races and re-entrancy so we can forget about it
2694 * in the protocol layers.
2695 *
2696 * Now it will return ENETDOWN, if device have just gone down,
2697 * but then it will block.
2698 */
2699
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002700 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002703 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 * handles the blocking we don't see and worry about blocking
2705 * retries.
2706 */
2707
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002708 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 goto out;
2710
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002711 if (pkt_sk(sk)->has_vnet_hdr) {
2712 struct virtio_net_hdr vnet_hdr = { 0 };
2713
2714 err = -EINVAL;
2715 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002716 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002717 goto out_free;
2718
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002719 len -= vnet_hdr_len;
2720
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002721 if (skb_is_gso(skb)) {
2722 struct skb_shared_info *sinfo = skb_shinfo(skb);
2723
2724 /* This is a hint as to how much should be linear. */
2725 vnet_hdr.hdr_len = skb_headlen(skb);
2726 vnet_hdr.gso_size = sinfo->gso_size;
2727 if (sinfo->gso_type & SKB_GSO_TCPV4)
2728 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2729 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2730 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2731 else if (sinfo->gso_type & SKB_GSO_UDP)
2732 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2733 else if (sinfo->gso_type & SKB_GSO_FCOE)
2734 goto out_free;
2735 else
2736 BUG();
2737 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2738 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2739 } else
2740 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2741
2742 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2743 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002744 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002745 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002746 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2747 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002748 } /* else everything is zero */
2749
2750 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2751 vnet_hdr_len);
2752 if (err < 0)
2753 goto out_free;
2754 }
2755
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 /*
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002757 * If the address length field is there to be filled in, we fill
2758 * it in now.
2759 */
2760
Herbert Xuffbc6112007-02-04 23:33:10 -08002761 sll = &PACKET_SKB_CB(skb)->sa.ll;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002762 if (sock->type == SOCK_PACKET)
2763 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2764 else
2765 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2766
2767 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 * You lose any data beyond the buffer you gave. If it worries a
2769 * user program they can ask the device for its MTU anyway.
2770 */
2771
2772 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002773 if (copied > len) {
2774 copied = len;
2775 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 }
2777
2778 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2779 if (err)
2780 goto out_free;
2781
Neil Horman3b885782009-10-12 13:26:31 -07002782 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
2784 if (msg->msg_name)
Herbert Xuffbc6112007-02-04 23:33:10 -08002785 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2786 msg->msg_namelen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
Herbert Xu8dc41942007-02-04 23:31:32 -08002788 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002789 struct tpacket_auxdata aux;
2790
2791 aux.tp_status = TP_STATUS_USER;
2792 if (skb->ip_summed == CHECKSUM_PARTIAL)
2793 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2794 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2795 aux.tp_snaplen = skb->len;
2796 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002797 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002798 if (vlan_tx_tag_present(skb)) {
2799 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2800 aux.tp_status |= TP_STATUS_VLAN_VALID;
2801 } else {
2802 aux.tp_vlan_tci = 0;
2803 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002804 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002805 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002806 }
2807
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 /*
2809 * Free or return the buffer as appropriate. Again this
2810 * hides all the races and re-entrancy issues from us.
2811 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002812 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
2814out_free:
2815 skb_free_datagram(sk, skb);
2816out:
2817 return err;
2818}
2819
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2821 int *uaddr_len, int peer)
2822{
2823 struct net_device *dev;
2824 struct sock *sk = sock->sk;
2825
2826 if (peer)
2827 return -EOPNOTSUPP;
2828
2829 uaddr->sa_family = AF_PACKET;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002830 rcu_read_lock();
2831 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2832 if (dev)
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002833 strncpy(uaddr->sa_data, dev->name, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002834 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 memset(uaddr->sa_data, 0, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002836 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 *uaddr_len = sizeof(*uaddr);
2838
2839 return 0;
2840}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
2842static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2843 int *uaddr_len, int peer)
2844{
2845 struct net_device *dev;
2846 struct sock *sk = sock->sk;
2847 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002848 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849
2850 if (peer)
2851 return -EOPNOTSUPP;
2852
2853 sll->sll_family = AF_PACKET;
2854 sll->sll_ifindex = po->ifindex;
2855 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002856 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002857 rcu_read_lock();
2858 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 if (dev) {
2860 sll->sll_hatype = dev->type;
2861 sll->sll_halen = dev->addr_len;
2862 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 } else {
2864 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2865 sll->sll_halen = 0;
2866 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002867 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002868 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
2870 return 0;
2871}
2872
Wang Chen2aeb0b82008-07-14 20:49:46 -07002873static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2874 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875{
2876 switch (i->type) {
2877 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002878 if (i->alen != dev->addr_len)
2879 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 if (what > 0)
Jiri Pirko22bedad2010-04-01 21:22:57 +00002881 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 else
Jiri Pirko22bedad2010-04-01 21:22:57 +00002883 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 break;
2885 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002886 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 break;
2888 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002889 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002891 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002892 if (i->alen != dev->addr_len)
2893 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002894 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002895 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002896 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002897 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002898 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002899 default:
2900 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002902 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903}
2904
2905static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2906{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002907 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 if (i->ifindex == dev->ifindex)
2909 packet_dev_mc(dev, i, what);
2910 }
2911}
2912
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002913static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914{
2915 struct packet_sock *po = pkt_sk(sk);
2916 struct packet_mclist *ml, *i;
2917 struct net_device *dev;
2918 int err;
2919
2920 rtnl_lock();
2921
2922 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002923 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 if (!dev)
2925 goto done;
2926
2927 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002928 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 goto done;
2930
2931 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002932 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 if (i == NULL)
2934 goto done;
2935
2936 err = 0;
2937 for (ml = po->mclist; ml; ml = ml->next) {
2938 if (ml->ifindex == mreq->mr_ifindex &&
2939 ml->type == mreq->mr_type &&
2940 ml->alen == mreq->mr_alen &&
2941 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2942 ml->count++;
2943 /* Free the new element ... */
2944 kfree(i);
2945 goto done;
2946 }
2947 }
2948
2949 i->type = mreq->mr_type;
2950 i->ifindex = mreq->mr_ifindex;
2951 i->alen = mreq->mr_alen;
2952 memcpy(i->addr, mreq->mr_address, i->alen);
2953 i->count = 1;
2954 i->next = po->mclist;
2955 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002956 err = packet_dev_mc(dev, i, 1);
2957 if (err) {
2958 po->mclist = i->next;
2959 kfree(i);
2960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
2962done:
2963 rtnl_unlock();
2964 return err;
2965}
2966
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002967static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968{
2969 struct packet_mclist *ml, **mlp;
2970
2971 rtnl_lock();
2972
2973 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2974 if (ml->ifindex == mreq->mr_ifindex &&
2975 ml->type == mreq->mr_type &&
2976 ml->alen == mreq->mr_alen &&
2977 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2978 if (--ml->count == 0) {
2979 struct net_device *dev;
2980 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00002981 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2982 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 kfree(ml);
2985 }
2986 rtnl_unlock();
2987 return 0;
2988 }
2989 }
2990 rtnl_unlock();
2991 return -EADDRNOTAVAIL;
2992}
2993
2994static void packet_flush_mclist(struct sock *sk)
2995{
2996 struct packet_sock *po = pkt_sk(sk);
2997 struct packet_mclist *ml;
2998
2999 if (!po->mclist)
3000 return;
3001
3002 rtnl_lock();
3003 while ((ml = po->mclist) != NULL) {
3004 struct net_device *dev;
3005
3006 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003007 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3008 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 kfree(ml);
3011 }
3012 rtnl_unlock();
3013}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014
3015static int
David S. Millerb7058842009-09-30 16:12:20 -07003016packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017{
3018 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08003019 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 int ret;
3021
3022 if (level != SOL_PACKET)
3023 return -ENOPROTOOPT;
3024
Johann Baudy69e3c752009-05-18 22:11:22 -07003025 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003026 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 case PACKET_DROP_MEMBERSHIP:
3028 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003029 struct packet_mreq_max mreq;
3030 int len = optlen;
3031 memset(&mreq, 0, sizeof(mreq));
3032 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003034 if (len > sizeof(mreq))
3035 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003036 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003038 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3039 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 if (optname == PACKET_ADD_MEMBERSHIP)
3041 ret = packet_mc_add(sk, &mreq);
3042 else
3043 ret = packet_mc_drop(sk, &mreq);
3044 return ret;
3045 }
David S. Millera2efcfa2007-05-29 13:12:50 -07003046
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07003048 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 {
chetan lokef6fb8f12011-08-19 10:18:16 +00003050 union tpacket_req_u req_u;
3051 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
chetan lokef6fb8f12011-08-19 10:18:16 +00003053 switch (po->tp_version) {
3054 case TPACKET_V1:
3055 case TPACKET_V2:
3056 len = sizeof(req_u.req);
3057 break;
3058 case TPACKET_V3:
3059 default:
3060 len = sizeof(req_u.req3);
3061 break;
3062 }
3063 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003065 if (pkt_sk(sk)->has_vnet_hdr)
3066 return -EINVAL;
chetan lokef6fb8f12011-08-19 10:18:16 +00003067 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 return -EFAULT;
chetan lokef6fb8f12011-08-19 10:18:16 +00003069 return packet_set_ring(sk, &req_u, 0,
3070 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 }
3072 case PACKET_COPY_THRESH:
3073 {
3074 int val;
3075
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003076 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003078 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 return -EFAULT;
3080
3081 pkt_sk(sk)->copy_thresh = val;
3082 return 0;
3083 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003084 case PACKET_VERSION:
3085 {
3086 int val;
3087
3088 if (optlen != sizeof(val))
3089 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003090 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003091 return -EBUSY;
3092 if (copy_from_user(&val, optval, sizeof(val)))
3093 return -EFAULT;
3094 switch (val) {
3095 case TPACKET_V1:
3096 case TPACKET_V2:
chetan lokef6fb8f12011-08-19 10:18:16 +00003097 case TPACKET_V3:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003098 po->tp_version = val;
3099 return 0;
3100 default:
3101 return -EINVAL;
3102 }
3103 }
Patrick McHardy89133362008-07-18 18:05:19 -07003104 case PACKET_RESERVE:
3105 {
3106 unsigned int val;
3107
3108 if (optlen != sizeof(val))
3109 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003110 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardy89133362008-07-18 18:05:19 -07003111 return -EBUSY;
3112 if (copy_from_user(&val, optval, sizeof(val)))
3113 return -EFAULT;
3114 po->tp_reserve = val;
3115 return 0;
3116 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003117 case PACKET_LOSS:
3118 {
3119 unsigned int val;
3120
3121 if (optlen != sizeof(val))
3122 return -EINVAL;
3123 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3124 return -EBUSY;
3125 if (copy_from_user(&val, optval, sizeof(val)))
3126 return -EFAULT;
3127 po->tp_loss = !!val;
3128 return 0;
3129 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003130 case PACKET_AUXDATA:
3131 {
3132 int val;
3133
3134 if (optlen < sizeof(val))
3135 return -EINVAL;
3136 if (copy_from_user(&val, optval, sizeof(val)))
3137 return -EFAULT;
3138
3139 po->auxdata = !!val;
3140 return 0;
3141 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003142 case PACKET_ORIGDEV:
3143 {
3144 int val;
3145
3146 if (optlen < sizeof(val))
3147 return -EINVAL;
3148 if (copy_from_user(&val, optval, sizeof(val)))
3149 return -EFAULT;
3150
3151 po->origdev = !!val;
3152 return 0;
3153 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003154 case PACKET_VNET_HDR:
3155 {
3156 int val;
3157
3158 if (sock->type != SOCK_RAW)
3159 return -EINVAL;
3160 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3161 return -EBUSY;
3162 if (optlen < sizeof(val))
3163 return -EINVAL;
3164 if (copy_from_user(&val, optval, sizeof(val)))
3165 return -EFAULT;
3166
3167 po->has_vnet_hdr = !!val;
3168 return 0;
3169 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003170 case PACKET_TIMESTAMP:
3171 {
3172 int val;
3173
3174 if (optlen != sizeof(val))
3175 return -EINVAL;
3176 if (copy_from_user(&val, optval, sizeof(val)))
3177 return -EFAULT;
3178
3179 po->tp_tstamp = val;
3180 return 0;
3181 }
David S. Millerdc99f602011-07-05 01:45:05 -07003182 case PACKET_FANOUT:
3183 {
3184 int val;
3185
3186 if (optlen != sizeof(val))
3187 return -EINVAL;
3188 if (copy_from_user(&val, optval, sizeof(val)))
3189 return -EFAULT;
3190
3191 return fanout_add(sk, val & 0xffff, val >> 16);
3192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 default:
3194 return -ENOPROTOOPT;
3195 }
3196}
3197
3198static int packet_getsockopt(struct socket *sock, int level, int optname,
3199 char __user *optval, int __user *optlen)
3200{
3201 int len;
Herbert Xu8dc41942007-02-04 23:31:32 -08003202 int val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 struct sock *sk = sock->sk;
3204 struct packet_sock *po = pkt_sk(sk);
Herbert Xu8dc41942007-02-04 23:31:32 -08003205 void *data;
3206 struct tpacket_stats st;
chetan lokef6fb8f12011-08-19 10:18:16 +00003207 union tpacket_stats_u st_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208
3209 if (level != SOL_PACKET)
3210 return -ENOPROTOOPT;
3211
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003212 if (get_user(len, optlen))
3213 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
3215 if (len < 0)
3216 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003217
Johann Baudy69e3c752009-05-18 22:11:22 -07003218 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 case PACKET_STATISTICS:
chetan lokef6fb8f12011-08-19 10:18:16 +00003220 if (po->tp_version == TPACKET_V3) {
3221 len = sizeof(struct tpacket_stats_v3);
3222 } else {
3223 if (len > sizeof(struct tpacket_stats))
3224 len = sizeof(struct tpacket_stats);
3225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 spin_lock_bh(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003227 if (po->tp_version == TPACKET_V3) {
3228 memcpy(&st_u.stats3, &po->stats,
3229 sizeof(struct tpacket_stats));
3230 st_u.stats3.tp_freeze_q_cnt =
3231 po->stats_u.stats3.tp_freeze_q_cnt;
3232 st_u.stats3.tp_packets += po->stats.tp_drops;
3233 data = &st_u.stats3;
3234 } else {
3235 st = po->stats;
3236 st.tp_packets += st.tp_drops;
3237 data = &st;
3238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 memset(&po->stats, 0, sizeof(st));
3240 spin_unlock_bh(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003242 case PACKET_AUXDATA:
3243 if (len > sizeof(int))
3244 len = sizeof(int);
3245 val = po->auxdata;
3246
3247 data = &val;
3248 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003249 case PACKET_ORIGDEV:
3250 if (len > sizeof(int))
3251 len = sizeof(int);
3252 val = po->origdev;
3253
3254 data = &val;
3255 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003256 case PACKET_VNET_HDR:
3257 if (len > sizeof(int))
3258 len = sizeof(int);
3259 val = po->has_vnet_hdr;
3260
3261 data = &val;
3262 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003263 case PACKET_VERSION:
3264 if (len > sizeof(int))
3265 len = sizeof(int);
3266 val = po->tp_version;
3267 data = &val;
3268 break;
3269 case PACKET_HDRLEN:
3270 if (len > sizeof(int))
3271 len = sizeof(int);
3272 if (copy_from_user(&val, optval, len))
3273 return -EFAULT;
3274 switch (val) {
3275 case TPACKET_V1:
3276 val = sizeof(struct tpacket_hdr);
3277 break;
3278 case TPACKET_V2:
3279 val = sizeof(struct tpacket2_hdr);
3280 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003281 case TPACKET_V3:
3282 val = sizeof(struct tpacket3_hdr);
3283 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003284 default:
3285 return -EINVAL;
3286 }
3287 data = &val;
3288 break;
Patrick McHardy89133362008-07-18 18:05:19 -07003289 case PACKET_RESERVE:
3290 if (len > sizeof(unsigned int))
3291 len = sizeof(unsigned int);
3292 val = po->tp_reserve;
3293 data = &val;
3294 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003295 case PACKET_LOSS:
3296 if (len > sizeof(unsigned int))
3297 len = sizeof(unsigned int);
3298 val = po->tp_loss;
3299 data = &val;
3300 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003301 case PACKET_TIMESTAMP:
3302 if (len > sizeof(int))
3303 len = sizeof(int);
3304 val = po->tp_tstamp;
3305 data = &val;
3306 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003307 case PACKET_FANOUT:
3308 if (len > sizeof(int))
3309 len = sizeof(int);
3310 val = (po->fanout ?
3311 ((u32)po->fanout->id |
3312 ((u32)po->fanout->type << 16)) :
3313 0);
3314 data = &val;
3315 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 default:
3317 return -ENOPROTOOPT;
3318 }
3319
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003320 if (put_user(len, optlen))
3321 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003322 if (copy_to_user(optval, data, len))
3323 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003324 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325}
3326
3327
3328static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3329{
3330 struct sock *sk;
3331 struct hlist_node *node;
Jason Lunzad930652007-02-20 23:19:54 -08003332 struct net_device *dev = data;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003333 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
stephen hemminger808f5112010-02-22 07:57:18 +00003335 rcu_read_lock();
3336 sk_for_each_rcu(sk, node, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 struct packet_sock *po = pkt_sk(sk);
3338
3339 switch (msg) {
3340 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 if (po->mclist)
3342 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003343 /* fallthrough */
3344
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 case NETDEV_DOWN:
3346 if (dev->ifindex == po->ifindex) {
3347 spin_lock(&po->bind_lock);
3348 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003349 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 sk->sk_err = ENETDOWN;
3351 if (!sock_flag(sk, SOCK_DEAD))
3352 sk->sk_error_report(sk);
3353 }
3354 if (msg == NETDEV_UNREGISTER) {
3355 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003356 if (po->prot_hook.dev)
3357 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 po->prot_hook.dev = NULL;
3359 }
3360 spin_unlock(&po->bind_lock);
3361 }
3362 break;
3363 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003364 if (dev->ifindex == po->ifindex) {
3365 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003366 if (po->num)
3367 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003368 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 break;
3371 }
3372 }
stephen hemminger808f5112010-02-22 07:57:18 +00003373 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 return NOTIFY_DONE;
3375}
3376
3377
3378static int packet_ioctl(struct socket *sock, unsigned int cmd,
3379 unsigned long arg)
3380{
3381 struct sock *sk = sock->sk;
3382
Johann Baudy69e3c752009-05-18 22:11:22 -07003383 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003384 case SIOCOUTQ:
3385 {
3386 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003387
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003388 return put_user(amount, (int __user *)arg);
3389 }
3390 case SIOCINQ:
3391 {
3392 struct sk_buff *skb;
3393 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003395 spin_lock_bh(&sk->sk_receive_queue.lock);
3396 skb = skb_peek(&sk->sk_receive_queue);
3397 if (skb)
3398 amount = skb->len;
3399 spin_unlock_bh(&sk->sk_receive_queue.lock);
3400 return put_user(amount, (int __user *)arg);
3401 }
3402 case SIOCGSTAMP:
3403 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3404 case SIOCGSTAMPNS:
3405 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003408 case SIOCADDRT:
3409 case SIOCDELRT:
3410 case SIOCDARP:
3411 case SIOCGARP:
3412 case SIOCSARP:
3413 case SIOCGIFADDR:
3414 case SIOCSIFADDR:
3415 case SIOCGIFBRDADDR:
3416 case SIOCSIFBRDADDR:
3417 case SIOCGIFNETMASK:
3418 case SIOCSIFNETMASK:
3419 case SIOCGIFDSTADDR:
3420 case SIOCSIFDSTADDR:
3421 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003422 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423#endif
3424
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003425 default:
3426 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 }
3428 return 0;
3429}
3430
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003431static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 poll_table *wait)
3433{
3434 struct sock *sk = sock->sk;
3435 struct packet_sock *po = pkt_sk(sk);
3436 unsigned int mask = datagram_poll(file, sock, wait);
3437
3438 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003439 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f12011-08-19 10:18:16 +00003440 if (!packet_previous_rx_frame(po, &po->rx_ring,
3441 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 mask |= POLLIN | POLLRDNORM;
3443 }
3444 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003445 spin_lock_bh(&sk->sk_write_queue.lock);
3446 if (po->tx_ring.pg_vec) {
3447 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3448 mask |= POLLOUT | POLLWRNORM;
3449 }
3450 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451 return mask;
3452}
3453
3454
3455/* Dirty? Well, I still did not learn better way to account
3456 * for user mmaps.
3457 */
3458
3459static void packet_mm_open(struct vm_area_struct *vma)
3460{
3461 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003462 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 if (sk)
3466 atomic_inc(&pkt_sk(sk)->mapped);
3467}
3468
3469static void packet_mm_close(struct vm_area_struct *vma)
3470{
3471 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003472 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003474
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 if (sk)
3476 atomic_dec(&pkt_sk(sk)->mapped);
3477}
3478
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003479static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003480 .open = packet_mm_open,
3481 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482};
3483
Neil Horman0e3125c2010-11-16 10:26:47 -08003484static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3485 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486{
3487 int i;
3488
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003489 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003490 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003491 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003492 vfree(pg_vec[i].buffer);
3493 else
3494 free_pages((unsigned long)pg_vec[i].buffer,
3495 order);
3496 pg_vec[i].buffer = NULL;
3497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 }
3499 kfree(pg_vec);
3500}
3501
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003502static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003503{
Neil Horman0e3125c2010-11-16 10:26:47 -08003504 char *buffer = NULL;
3505 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3506 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003507
Neil Horman0e3125c2010-11-16 10:26:47 -08003508 buffer = (char *) __get_free_pages(gfp_flags, order);
3509
3510 if (buffer)
3511 return buffer;
3512
3513 /*
3514 * __get_free_pages failed, fall back to vmalloc
3515 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003516 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003517
3518 if (buffer)
3519 return buffer;
3520
3521 /*
3522 * vmalloc failed, lets dig into swap here
3523 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003524 gfp_flags &= ~__GFP_NORETRY;
3525 buffer = (char *)__get_free_pages(gfp_flags, order);
3526 if (buffer)
3527 return buffer;
3528
3529 /*
3530 * complete and utter failure
3531 */
3532 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003533}
3534
Neil Horman0e3125c2010-11-16 10:26:47 -08003535static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003536{
3537 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003538 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003539 int i;
3540
Neil Horman0e3125c2010-11-16 10:26:47 -08003541 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003542 if (unlikely(!pg_vec))
3543 goto out;
3544
3545 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003546 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003547 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003548 goto out_free_pgvec;
3549 }
3550
3551out:
3552 return pg_vec;
3553
3554out_free_pgvec:
3555 free_pg_vec(pg_vec, order, block_nr);
3556 pg_vec = NULL;
3557 goto out;
3558}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559
chetan lokef6fb8f12011-08-19 10:18:16 +00003560static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003561 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562{
Neil Horman0e3125c2010-11-16 10:26:47 -08003563 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003565 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003566 struct packet_ring_buffer *rb;
3567 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003568 __be16 num;
chetan lokef6fb8f12011-08-19 10:18:16 +00003569 int err = -EINVAL;
3570 /* Added to avoid minimal code churn */
3571 struct tpacket_req *req = &req_u->req;
3572
3573 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3574 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3575 WARN(1, "Tx-ring is not supported.\n");
3576 goto out;
3577 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003578
3579 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3580 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3581
3582 err = -EBUSY;
3583 if (!closing) {
3584 if (atomic_read(&po->mapped))
3585 goto out;
3586 if (atomic_read(&rb->pending))
3587 goto out;
3588 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003589
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003592 err = -EBUSY;
3593 if (unlikely(rb->pg_vec))
3594 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003596 switch (po->tp_version) {
3597 case TPACKET_V1:
3598 po->tp_hdrlen = TPACKET_HDRLEN;
3599 break;
3600 case TPACKET_V2:
3601 po->tp_hdrlen = TPACKET2_HDRLEN;
3602 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003603 case TPACKET_V3:
3604 po->tp_hdrlen = TPACKET3_HDRLEN;
3605 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003606 }
3607
Johann Baudy69e3c752009-05-18 22:11:22 -07003608 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003609 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003610 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003611 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003612 goto out;
Patrick McHardy89133362008-07-18 18:05:19 -07003613 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003614 po->tp_reserve))
3615 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003616 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003617 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618
Johann Baudy69e3c752009-05-18 22:11:22 -07003619 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3620 if (unlikely(rb->frames_per_block <= 0))
3621 goto out;
3622 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3623 req->tp_frame_nr))
3624 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625
3626 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003627 order = get_order(req->tp_block_size);
3628 pg_vec = alloc_pg_vec(req, order);
3629 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 goto out;
chetan lokef6fb8f12011-08-19 10:18:16 +00003631 switch (po->tp_version) {
3632 case TPACKET_V3:
3633 /* Transmit path is not supported. We checked
3634 * it above but just being paranoid
3635 */
3636 if (!tx_ring)
3637 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3638 break;
3639 default:
3640 break;
3641 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003642 }
3643 /* Done */
3644 else {
3645 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003646 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003647 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 }
3649
3650 lock_sock(sk);
3651
3652 /* Detach socket from network */
3653 spin_lock(&po->bind_lock);
3654 was_running = po->running;
3655 num = po->num;
3656 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003658 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 }
3660 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003661
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 synchronize_net();
3663
3664 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003665 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 if (closing || atomic_read(&po->mapped) == 0) {
3667 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003668 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003669 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003670 rb->frame_max = (req->tp_frame_nr - 1);
3671 rb->head = 0;
3672 rb->frame_size = req->tp_frame_size;
3673 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
Changli Gaoc053fd92010-12-10 16:02:20 -08003675 swap(rb->pg_vec_order, order);
3676 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677
Johann Baudy69e3c752009-05-18 22:11:22 -07003678 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3679 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3680 tpacket_rcv : packet_rcv;
3681 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003683 pr_err("packet_mmap: vma is busy: %d\n",
3684 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 }
Herbert Xu905db442009-01-30 14:12:06 -08003686 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687
3688 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003689 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003691 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 }
3693 spin_unlock(&po->bind_lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003694 if (closing && (po->tp_version > TPACKET_V2)) {
3695 /* Because we don't support block-based V3 on tx-ring */
3696 if (!tx_ring)
3697 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 release_sock(sk);
3700
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 if (pg_vec)
3702 free_pg_vec(pg_vec, order, req->tp_block_nr);
3703out:
3704 return err;
3705}
3706
Johann Baudy69e3c752009-05-18 22:11:22 -07003707static int packet_mmap(struct file *file, struct socket *sock,
3708 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709{
3710 struct sock *sk = sock->sk;
3711 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003712 unsigned long size, expected_size;
3713 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 unsigned long start;
3715 int err = -EINVAL;
3716 int i;
3717
3718 if (vma->vm_pgoff)
3719 return -EINVAL;
3720
Herbert Xu905db442009-01-30 14:12:06 -08003721 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003722
3723 expected_size = 0;
3724 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3725 if (rb->pg_vec) {
3726 expected_size += rb->pg_vec_len
3727 * rb->pg_vec_pages
3728 * PAGE_SIZE;
3729 }
3730 }
3731
3732 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003734
3735 size = vma->vm_end - vma->vm_start;
3736 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 goto out;
3738
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003740 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3741 if (rb->pg_vec == NULL)
3742 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003743
Johann Baudy69e3c752009-05-18 22:11:22 -07003744 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003745 struct page *page;
3746 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003747 int pg_num;
3748
Changli Gaoc56b4d92010-12-01 02:52:57 +00003749 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3750 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003751 err = vm_insert_page(vma, start, page);
3752 if (unlikely(err))
3753 goto out;
3754 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003755 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003756 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003759
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003760 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 vma->vm_ops = &packet_mmap_ops;
3762 err = 0;
3763
3764out:
Herbert Xu905db442009-01-30 14:12:06 -08003765 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 return err;
3767}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003769static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 .family = PF_PACKET,
3771 .owner = THIS_MODULE,
3772 .release = packet_release,
3773 .bind = packet_bind_spkt,
3774 .connect = sock_no_connect,
3775 .socketpair = sock_no_socketpair,
3776 .accept = sock_no_accept,
3777 .getname = packet_getname_spkt,
3778 .poll = datagram_poll,
3779 .ioctl = packet_ioctl,
3780 .listen = sock_no_listen,
3781 .shutdown = sock_no_shutdown,
3782 .setsockopt = sock_no_setsockopt,
3783 .getsockopt = sock_no_getsockopt,
3784 .sendmsg = packet_sendmsg_spkt,
3785 .recvmsg = packet_recvmsg,
3786 .mmap = sock_no_mmap,
3787 .sendpage = sock_no_sendpage,
3788};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003790static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 .family = PF_PACKET,
3792 .owner = THIS_MODULE,
3793 .release = packet_release,
3794 .bind = packet_bind,
3795 .connect = sock_no_connect,
3796 .socketpair = sock_no_socketpair,
3797 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003798 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 .poll = packet_poll,
3800 .ioctl = packet_ioctl,
3801 .listen = sock_no_listen,
3802 .shutdown = sock_no_shutdown,
3803 .setsockopt = packet_setsockopt,
3804 .getsockopt = packet_getsockopt,
3805 .sendmsg = packet_sendmsg,
3806 .recvmsg = packet_recvmsg,
3807 .mmap = packet_mmap,
3808 .sendpage = sock_no_sendpage,
3809};
3810
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003811static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 .family = PF_PACKET,
3813 .create = packet_create,
3814 .owner = THIS_MODULE,
3815};
3816
3817static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003818 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819};
3820
3821#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
3823static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003824 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825{
Denis V. Luneve372c4142007-11-19 22:31:54 -08003826 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003827
3828 rcu_read_lock();
3829 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830}
3831
3832static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3833{
Herbert Xu1bf40952007-12-16 14:04:02 -08003834 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003835 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836}
3837
3838static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003839 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840{
stephen hemminger808f5112010-02-22 07:57:18 +00003841 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842}
3843
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003844static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845{
3846 if (v == SEQ_START_TOKEN)
3847 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3848 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003849 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 const struct packet_sock *po = pkt_sk(s);
3851
3852 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003853 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 s,
3855 atomic_read(&s->sk_refcnt),
3856 s->sk_type,
3857 ntohs(po->num),
3858 po->ifindex,
3859 po->running,
3860 atomic_read(&s->sk_rmem_alloc),
3861 sock_i_uid(s),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003862 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 }
3864
3865 return 0;
3866}
3867
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003868static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 .start = packet_seq_start,
3870 .next = packet_seq_next,
3871 .stop = packet_seq_stop,
3872 .show = packet_seq_show,
3873};
3874
3875static int packet_seq_open(struct inode *inode, struct file *file)
3876{
Denis V. Luneve372c4142007-11-19 22:31:54 -08003877 return seq_open_net(inode, file, &packet_seq_ops,
3878 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879}
3880
Arjan van de Venda7071d2007-02-12 00:55:36 -08003881static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 .owner = THIS_MODULE,
3883 .open = packet_seq_open,
3884 .read = seq_read,
3885 .llseek = seq_lseek,
Denis V. Luneve372c4142007-11-19 22:31:54 -08003886 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887};
3888
3889#endif
3890
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003891static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003892{
stephen hemminger808f5112010-02-22 07:57:18 +00003893 spin_lock_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003894 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003895
3896 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3897 return -ENOMEM;
3898
3899 return 0;
3900}
3901
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003902static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003903{
3904 proc_net_remove(net, "packet");
3905}
3906
3907static struct pernet_operations packet_net_ops = {
3908 .init = packet_net_init,
3909 .exit = packet_net_exit,
3910};
3911
3912
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913static void __exit packet_exit(void)
3914{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003916 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 sock_unregister(PF_PACKET);
3918 proto_unregister(&packet_proto);
3919}
3920
3921static int __init packet_init(void)
3922{
3923 int rc = proto_register(&packet_proto, 0);
3924
3925 if (rc != 0)
3926 goto out;
3927
3928 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003929 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931out:
3932 return rc;
3933}
3934
3935module_init(packet_init);
3936module_exit(packet_exit);
3937MODULE_LICENSE("GPL");
3938MODULE_ALIAS_NETPROTO(PF_PACKET);