blob: 79c75e1d17ee3a2e70d276e366d8aa5ecf172e91 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f12011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -040079#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080086#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070087#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080088#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000089#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070090#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 Assumptions:
98 - if device has no dev->hard_header routine, it adds and removes ll header
99 inside itself. In this case ll header is invisible outside of device,
100 but higher levels still should reserve dev->hard_header_len.
101 Some devices are enough clever to reallocate skb, when header
102 will not fit to reserved space (tunnel), another ones are silly
103 (PPP).
104 - packet socket receives packets with pulled ll header,
105 so that SOCK_RAW should push it back.
106
107On receive:
108-----------
109
110Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700111 mac_header -> ll header
112 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700115 mac_header -> ll header
116 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700119 mac_header -> UNKNOWN position. It is very likely, that it points to ll
120 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900121 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700122 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700125 mac_header -> data. ll header is still not built!
126 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128Resume
129 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
130
131
132On transmit:
133------------
134
135dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700136 mac_header -> ll header
137 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700140 mac_header -> data
141 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 We should set nh.raw on output to correct posistion,
144 packet classifier depends on it.
145 */
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/* Private packet socket structures. */
148
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000149struct packet_mclist {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 struct packet_mclist *next;
151 int ifindex;
152 int count;
153 unsigned short type;
154 unsigned short alen;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700155 unsigned char addr[MAX_ADDR_LEN];
156};
157/* identical to struct packet_mreq except it has
158 * a longer address field.
159 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000160struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700161 int mr_ifindex;
162 unsigned short mr_type;
163 unsigned short mr_alen;
164 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165};
David S. Millera2efcfa2007-05-29 13:12:50 -0700166
chetan lokef6fb8f12011-08-19 10:18:16 +0000167static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700168 int closing, int tx_ring);
169
chetan lokef6fb8f12011-08-19 10:18:16 +0000170
171#define V3_ALIGNMENT (8)
172
chetan lokebc59ba32011-08-25 10:43:30 +0000173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f12011-08-19 10:18:16 +0000174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
178/* kbdq - kernel block descriptor queue */
chetan lokebc59ba32011-08-25 10:43:30 +0000179struct tpacket_kbdq_core {
chetan lokef6fb8f12011-08-19 10:18:16 +0000180 struct pgv *pkbdq;
181 unsigned int feature_req_word;
182 unsigned int hdrlen;
183 unsigned char reset_pending_on_curr_blk;
184 unsigned char delete_blk_timer;
185 unsigned short kactive_blk_num;
186 unsigned short blk_sizeof_priv;
187
188 /* last_kactive_blk_num:
189 * trick to see if user-space has caught up
190 * in order to avoid refreshing timer when every single pkt arrives.
191 */
192 unsigned short last_kactive_blk_num;
193
194 char *pkblk_start;
195 char *pkblk_end;
196 int kblk_size;
197 unsigned int knum_blocks;
198 uint64_t knxt_seq_num;
199 char *prev;
200 char *nxt_offset;
201 struct sk_buff *skb;
202
203 atomic_t blk_fill_in_prog;
204
205 /* Default is set to 8ms */
206#define DEFAULT_PRB_RETIRE_TOV (8)
207
208 unsigned short retire_blk_tov;
209 unsigned short version;
210 unsigned long tov_in_jiffies;
211
212 /* timer to retire an outstanding block */
213 struct timer_list retire_blk_timer;
214};
215
216#define PGV_FROM_VMALLOC 1
Neil Horman0e3125c2010-11-16 10:26:47 -0800217struct pgv {
218 char *buffer;
Neil Horman0e3125c2010-11-16 10:26:47 -0800219};
220
Johann Baudy69e3c752009-05-18 22:11:22 -0700221struct packet_ring_buffer {
Neil Horman0e3125c2010-11-16 10:26:47 -0800222 struct pgv *pg_vec;
Johann Baudy69e3c752009-05-18 22:11:22 -0700223 unsigned int head;
224 unsigned int frames_per_block;
225 unsigned int frame_size;
226 unsigned int frame_max;
227
228 unsigned int pg_vec_order;
229 unsigned int pg_vec_pages;
230 unsigned int pg_vec_len;
231
chetan lokebc59ba32011-08-25 10:43:30 +0000232 struct tpacket_kbdq_core prb_bdqc;
Johann Baudy69e3c752009-05-18 22:11:22 -0700233 atomic_t pending;
234};
235
chetan lokef6fb8f12011-08-19 10:18:16 +0000236#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
237#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
238#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
239#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
240#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
241#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
242#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
243
Johann Baudy69e3c752009-05-18 22:11:22 -0700244struct packet_sock;
245static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
chetan lokef6fb8f12011-08-19 10:18:16 +0000247static void *packet_previous_frame(struct packet_sock *po,
248 struct packet_ring_buffer *rb,
249 int status);
250static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000251static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
252 struct tpacket_block_desc *);
253static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000254 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000255static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000256 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000257static int prb_queue_frozen(struct tpacket_kbdq_core *);
258static void prb_open_block(struct tpacket_kbdq_core *,
259 struct tpacket_block_desc *);
chetan lokef6fb8f12011-08-19 10:18:16 +0000260static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000261static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
262static void prb_init_blk_timer(struct packet_sock *,
263 struct tpacket_kbdq_core *,
264 void (*func) (unsigned long));
265static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
266static void prb_clear_rxhash(struct tpacket_kbdq_core *,
267 struct tpacket3_hdr *);
268static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
269 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270static void packet_flush_mclist(struct sock *sk);
271
David S. Millerdc99f602011-07-05 01:45:05 -0700272struct packet_fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273struct packet_sock {
274 /* struct sock has to be the first member of packet_sock */
275 struct sock sk;
David S. Millerdc99f602011-07-05 01:45:05 -0700276 struct packet_fanout *fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 struct tpacket_stats stats;
chetan lokef6fb8f12011-08-19 10:18:16 +0000278 union tpacket_stats_u stats_u;
Johann Baudy69e3c752009-05-18 22:11:22 -0700279 struct packet_ring_buffer rx_ring;
280 struct packet_ring_buffer tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 int copy_thresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 spinlock_t bind_lock;
Herbert Xu905db442009-01-30 14:12:06 -0800283 struct mutex pg_vec_lock;
Herbert Xu8dc41942007-02-04 23:31:32 -0800284 unsigned int running:1, /* prot_hook is attached*/
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -0700285 auxdata:1,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -0800286 origdev:1,
287 has_vnet_hdr:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 int ifindex; /* bound device */
Al Viro0e11c912006-11-08 00:26:29 -0800289 __be16 num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 struct packet_mclist *mclist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 atomic_t mapped;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700292 enum tpacket_versions tp_version;
293 unsigned int tp_hdrlen;
Patrick McHardy89133362008-07-18 18:05:19 -0700294 unsigned int tp_reserve;
Johann Baudy69e3c752009-05-18 22:11:22 -0700295 unsigned int tp_loss:1;
Scott McMillan614f60f2010-06-02 05:53:56 -0700296 unsigned int tp_tstamp;
Daniel Borkmann63485172013-11-21 16:50:58 +0100297 struct net_device __rcu *cached_dev;
Eric Dumazet94b05952009-10-16 04:02:20 +0000298 struct packet_type prot_hook ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299};
300
David S. Millerdc99f602011-07-05 01:45:05 -0700301#define PACKET_FANOUT_MAX 256
302
303struct packet_fanout {
304#ifdef CONFIG_NET_NS
305 struct net *net;
306#endif
307 unsigned int num_members;
308 u16 id;
309 u8 type;
David S. Miller7736d332011-07-05 01:43:20 -0700310 u8 defrag;
David S. Millerdc99f602011-07-05 01:45:05 -0700311 atomic_t rr_cur;
312 struct list_head list;
313 struct sock *arr[PACKET_FANOUT_MAX];
314 spinlock_t lock;
315 atomic_t sk_ref;
316 struct packet_type prot_hook ____cacheline_aligned_in_smp;
317};
318
Herbert Xuffbc6112007-02-04 23:33:10 -0800319struct packet_skb_cb {
320 unsigned int origlen;
321 union {
322 struct sockaddr_pkt pkt;
323 struct sockaddr_ll ll;
324 } sa;
325};
326
327#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800328
chetan lokebc59ba32011-08-25 10:43:30 +0000329#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f12011-08-19 10:18:16 +0000330#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000331 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000332#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000333 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000334#define GET_NEXT_PRB_BLK_NUM(x) \
335 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
336 ((x)->kactive_blk_num+1) : 0)
337
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000338static struct packet_sock *pkt_sk(struct sock *sk)
David S. Millerce06b032011-07-04 01:44:29 -0700339{
340 return (struct packet_sock *)sk;
341}
342
David S. Millerdc99f602011-07-05 01:45:05 -0700343static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
344static void __fanout_link(struct sock *sk, struct packet_sock *po);
345
David S. Millerce06b032011-07-04 01:44:29 -0700346/* register_prot_hook must be invoked with the po->bind_lock held,
347 * or from a context in which asynchronous accesses to the packet
348 * socket is not possible (packet_create()).
349 */
350static void register_prot_hook(struct sock *sk)
351{
352 struct packet_sock *po = pkt_sk(sk);
Daniel Borkmann63485172013-11-21 16:50:58 +0100353
David S. Millerce06b032011-07-04 01:44:29 -0700354 if (!po->running) {
Daniel Borkmann63485172013-11-21 16:50:58 +0100355 if (po->fanout) {
David S. Millerdc99f602011-07-05 01:45:05 -0700356 __fanout_link(sk, po);
Daniel Borkmann63485172013-11-21 16:50:58 +0100357 } else {
David S. Millerdc99f602011-07-05 01:45:05 -0700358 dev_add_pack(&po->prot_hook);
Daniel Borkmann63485172013-11-21 16:50:58 +0100359 rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
360 }
361
David S. Millerce06b032011-07-04 01:44:29 -0700362 sock_hold(sk);
363 po->running = 1;
364 }
365}
366
367/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
368 * held. If the sync parameter is true, we will temporarily drop
369 * the po->bind_lock and do a synchronize_net to make sure no
370 * asynchronous packet processing paths still refer to the elements
371 * of po->prot_hook. If the sync parameter is false, it is the
372 * callers responsibility to take care of this.
373 */
374static void __unregister_prot_hook(struct sock *sk, bool sync)
375{
376 struct packet_sock *po = pkt_sk(sk);
377
378 po->running = 0;
Daniel Borkmann63485172013-11-21 16:50:58 +0100379 if (po->fanout) {
David S. Millerdc99f602011-07-05 01:45:05 -0700380 __fanout_unlink(sk, po);
Daniel Borkmann63485172013-11-21 16:50:58 +0100381 } else {
David S. Millerdc99f602011-07-05 01:45:05 -0700382 __dev_remove_pack(&po->prot_hook);
Daniel Borkmann63485172013-11-21 16:50:58 +0100383 RCU_INIT_POINTER(po->cached_dev, NULL);
384 }
385
David S. Millerce06b032011-07-04 01:44:29 -0700386 __sock_put(sk);
387
388 if (sync) {
389 spin_unlock(&po->bind_lock);
390 synchronize_net();
391 spin_lock(&po->bind_lock);
392 }
393}
394
395static void unregister_prot_hook(struct sock *sk, bool sync)
396{
397 struct packet_sock *po = pkt_sk(sk);
398
399 if (po->running)
400 __unregister_prot_hook(sk, sync);
401}
402
Changli Gaof6dafa92010-12-07 04:26:16 +0000403static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000404{
405 if (is_vmalloc_addr(addr))
406 return vmalloc_to_page(addr);
407 return virt_to_page(addr);
408}
409
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700410static void __packet_set_status(struct packet_sock *po, void *frame, int status)
411{
412 union {
413 struct tpacket_hdr *h1;
414 struct tpacket2_hdr *h2;
415 void *raw;
416 } h;
417
418 h.raw = frame;
419 switch (po->tp_version) {
420 case TPACKET_V1:
421 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000422 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700423 break;
424 case TPACKET_V2:
425 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000426 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700427 break;
chetan lokef6fb8f12011-08-19 10:18:16 +0000428 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700429 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000430 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700431 BUG();
432 }
433
434 smp_wmb();
435}
436
437static int __packet_get_status(struct packet_sock *po, void *frame)
438{
439 union {
440 struct tpacket_hdr *h1;
441 struct tpacket2_hdr *h2;
442 void *raw;
443 } h;
444
445 smp_rmb();
446
447 h.raw = frame;
448 switch (po->tp_version) {
449 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000450 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700451 return h.h1->tp_status;
452 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000453 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700454 return h.h2->tp_status;
chetan lokef6fb8f12011-08-19 10:18:16 +0000455 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700456 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000457 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700458 BUG();
459 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461}
Johann Baudy69e3c752009-05-18 22:11:22 -0700462
463static void *packet_lookup_frame(struct packet_sock *po,
464 struct packet_ring_buffer *rb,
465 unsigned int position,
466 int status)
467{
468 unsigned int pg_vec_pos, frame_offset;
469 union {
470 struct tpacket_hdr *h1;
471 struct tpacket2_hdr *h2;
472 void *raw;
473 } h;
474
475 pg_vec_pos = position / rb->frames_per_block;
476 frame_offset = position % rb->frames_per_block;
477
Neil Horman0e3125c2010-11-16 10:26:47 -0800478 h.raw = rb->pg_vec[pg_vec_pos].buffer +
479 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700480
481 if (status != __packet_get_status(po, h.raw))
482 return NULL;
483
484 return h.raw;
485}
486
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000487static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700488 struct packet_ring_buffer *rb,
489 int status)
490{
491 return packet_lookup_frame(po, rb, rb->head, status);
492}
493
chetan lokebc59ba32011-08-25 10:43:30 +0000494static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000495{
496 del_timer_sync(&pkc->retire_blk_timer);
497}
498
499static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
500 int tx_ring,
501 struct sk_buff_head *rb_queue)
502{
chetan lokebc59ba32011-08-25 10:43:30 +0000503 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000504
505 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
506
Veaceslav Falicoa4da7a32013-11-29 09:53:23 +0100507 spin_lock_bh(&rb_queue->lock);
chetan lokef6fb8f12011-08-19 10:18:16 +0000508 pkc->delete_blk_timer = 1;
Veaceslav Falicoa4da7a32013-11-29 09:53:23 +0100509 spin_unlock_bh(&rb_queue->lock);
chetan lokef6fb8f12011-08-19 10:18:16 +0000510
511 prb_del_retire_blk_timer(pkc);
512}
513
514static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000515 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000516 void (*func) (unsigned long))
517{
518 init_timer(&pkc->retire_blk_timer);
519 pkc->retire_blk_timer.data = (long)po;
520 pkc->retire_blk_timer.function = func;
521 pkc->retire_blk_timer.expires = jiffies;
522}
523
524static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
525{
chetan lokebc59ba32011-08-25 10:43:30 +0000526 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000527
528 if (tx_ring)
529 BUG();
530
531 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
532 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
533}
534
535static int prb_calc_retire_blk_tmo(struct packet_sock *po,
536 int blk_size_in_bytes)
537{
538 struct net_device *dev;
539 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000540 struct ethtool_cmd ecmd;
541 int err;
chetan lokef6fb8f12011-08-19 10:18:16 +0000542
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000543 rtnl_lock();
544 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
545 if (unlikely(!dev)) {
546 rtnl_unlock();
chetan lokef6fb8f12011-08-19 10:18:16 +0000547 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000548 }
549 err = __ethtool_get_settings(dev, &ecmd);
550 rtnl_unlock();
551 if (!err) {
552 switch (ecmd.speed) {
553 case SPEED_10000:
554 msec = 1;
555 div = 10000/1000;
556 break;
557 case SPEED_1000:
558 msec = 1;
559 div = 1000/1000;
560 break;
561 /*
562 * If the link speed is so slow you don't really
563 * need to worry about perf anyways
564 */
565 case SPEED_100:
566 case SPEED_10:
567 default:
568 return DEFAULT_PRB_RETIRE_TOV;
chetan lokef6fb8f12011-08-19 10:18:16 +0000569 }
570 }
571
572 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
573
574 if (div)
575 mbits /= div;
576
577 tmo = mbits * msec;
578
579 if (div)
580 return tmo+1;
581 return tmo;
582}
583
chetan lokebc59ba32011-08-25 10:43:30 +0000584static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000585 union tpacket_req_u *req_u)
586{
587 p1->feature_req_word = req_u->req3.tp_feature_req_word;
588}
589
590static void init_prb_bdqc(struct packet_sock *po,
591 struct packet_ring_buffer *rb,
592 struct pgv *pg_vec,
593 union tpacket_req_u *req_u, int tx_ring)
594{
chetan lokebc59ba32011-08-25 10:43:30 +0000595 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
596 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000597
598 memset(p1, 0x0, sizeof(*p1));
599
600 p1->knxt_seq_num = 1;
601 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000602 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
chetan lokef6fb8f12011-08-19 10:18:16 +0000603 p1->pkblk_start = (char *)pg_vec[0].buffer;
604 p1->kblk_size = req_u->req3.tp_block_size;
605 p1->knum_blocks = req_u->req3.tp_block_nr;
606 p1->hdrlen = po->tp_hdrlen;
607 p1->version = po->tp_version;
608 p1->last_kactive_blk_num = 0;
609 po->stats_u.stats3.tp_freeze_q_cnt = 0;
610 if (req_u->req3.tp_retire_blk_tov)
611 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
612 else
613 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
614 req_u->req3.tp_block_size);
615 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
616 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
617
618 prb_init_ft_ops(p1, req_u);
619 prb_setup_retire_blk_timer(po, tx_ring);
620 prb_open_block(p1, pbd);
621}
622
623/* Do NOT update the last_blk_num first.
624 * Assumes sk_buff_head lock is held.
625 */
chetan lokebc59ba32011-08-25 10:43:30 +0000626static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000627{
628 mod_timer(&pkc->retire_blk_timer,
629 jiffies + pkc->tov_in_jiffies);
630 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
631}
632
633/*
634 * Timer logic:
635 * 1) We refresh the timer only when we open a block.
636 * By doing this we don't waste cycles refreshing the timer
637 * on packet-by-packet basis.
638 *
639 * With a 1MB block-size, on a 1Gbps line, it will take
640 * i) ~8 ms to fill a block + ii) memcpy etc.
641 * In this cut we are not accounting for the memcpy time.
642 *
643 * So, if the user sets the 'tmo' to 10ms then the timer
644 * will never fire while the block is still getting filled
645 * (which is what we want). However, the user could choose
646 * to close a block early and that's fine.
647 *
648 * But when the timer does fire, we check whether or not to refresh it.
649 * Since the tmo granularity is in msecs, it is not too expensive
650 * to refresh the timer, lets say every '8' msecs.
651 * Either the user can set the 'tmo' or we can derive it based on
652 * a) line-speed and b) block-size.
653 * prb_calc_retire_blk_tmo() calculates the tmo.
654 *
655 */
656static void prb_retire_rx_blk_timer_expired(unsigned long data)
657{
658 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000659 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000660 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000661 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000662
663 spin_lock(&po->sk.sk_receive_queue.lock);
664
665 frozen = prb_queue_frozen(pkc);
666 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
667
668 if (unlikely(pkc->delete_blk_timer))
669 goto out;
670
671 /* We only need to plug the race when the block is partially filled.
672 * tpacket_rcv:
673 * lock(); increment BLOCK_NUM_PKTS; unlock()
674 * copy_bits() is in progress ...
675 * timer fires on other cpu:
676 * we can't retire the current block because copy_bits
677 * is in progress.
678 *
679 */
680 if (BLOCK_NUM_PKTS(pbd)) {
681 while (atomic_read(&pkc->blk_fill_in_prog)) {
682 /* Waiting for skb_copy_bits to finish... */
683 cpu_relax();
684 }
685 }
686
687 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
688 if (!frozen) {
689 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
690 if (!prb_dispatch_next_block(pkc, po))
691 goto refresh_timer;
692 else
693 goto out;
694 } else {
695 /* Case 1. Queue was frozen because user-space was
696 * lagging behind.
697 */
698 if (prb_curr_blk_in_use(pkc, pbd)) {
699 /*
700 * Ok, user-space is still behind.
701 * So just refresh the timer.
702 */
703 goto refresh_timer;
704 } else {
705 /* Case 2. queue was frozen,user-space caught up,
706 * now the link went idle && the timer fired.
707 * We don't have a block to close.So we open this
708 * block and restart the timer.
709 * opening a block thaws the queue,restarts timer
710 * Thawing/timer-refresh is a side effect.
711 */
712 prb_open_block(pkc, pbd);
713 goto out;
714 }
715 }
716 }
717
718refresh_timer:
719 _prb_refresh_rx_retire_blk_timer(pkc);
720
721out:
722 spin_unlock(&po->sk.sk_receive_queue.lock);
723}
724
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000725static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000726 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f12011-08-19 10:18:16 +0000727{
728 /* Flush everything minus the block header */
729
730#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
731 u8 *start, *end;
732
733 start = (u8 *)pbd1;
734
735 /* Skip the block header(we know header WILL fit in 4K) */
736 start += PAGE_SIZE;
737
738 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
739 for (; start < end; start += PAGE_SIZE)
740 flush_dcache_page(pgv_to_page(start));
741
742 smp_wmb();
743#endif
744
745 /* Now update the block status. */
746
747 BLOCK_STATUS(pbd1) = status;
748
749 /* Flush the block header */
750
751#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
752 start = (u8 *)pbd1;
753 flush_dcache_page(pgv_to_page(start));
754
755 smp_wmb();
756#endif
757}
758
759/*
760 * Side effect:
761 *
762 * 1) flush the block
763 * 2) Increment active_blk_num
764 *
765 * Note:We DONT refresh the timer on purpose.
766 * Because almost always the next block will be opened.
767 */
chetan lokebc59ba32011-08-25 10:43:30 +0000768static void prb_close_block(struct tpacket_kbdq_core *pkc1,
769 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000770 struct packet_sock *po, unsigned int stat)
771{
772 __u32 status = TP_STATUS_USER | stat;
773
774 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000775 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000776
777 if (po->stats.tp_drops)
778 status |= TP_STATUS_LOSING;
779
780 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
781 last_pkt->tp_next_offset = 0;
782
783 /* Get the ts of the last pkt */
784 if (BLOCK_NUM_PKTS(pbd1)) {
785 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
786 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
787 } else {
788 /* Ok, we tmo'd - so get the current time */
789 struct timespec ts;
790 getnstimeofday(&ts);
791 h1->ts_last_pkt.ts_sec = ts.tv_sec;
792 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
793 }
794
795 smp_wmb();
796
797 /* Flush the block */
798 prb_flush_block(pkc1, pbd1, status);
799
800 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
801}
802
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000803static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000804{
805 pkc->reset_pending_on_curr_blk = 0;
806}
807
808/*
809 * Side effect of opening a block:
810 *
811 * 1) prb_queue is thawed.
812 * 2) retire_blk_timer is refreshed.
813 *
814 */
chetan lokebc59ba32011-08-25 10:43:30 +0000815static void prb_open_block(struct tpacket_kbdq_core *pkc1,
816 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f12011-08-19 10:18:16 +0000817{
818 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000819 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000820
821 smp_rmb();
822
Daniel Borkmann2cbf1962013-05-03 02:57:00 +0000823 /* We could have just memset this but we will lose the
824 * flexibility of making the priv area sticky
825 */
826 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
827 BLOCK_NUM_PKTS(pbd1) = 0;
828 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
829 getnstimeofday(&ts);
830 h1->ts_first_pkt.ts_sec = ts.tv_sec;
831 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
832 pkc1->pkblk_start = (char *)pbd1;
833 pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
834 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
835 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
836 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
837 pbd1->version = pkc1->version;
838 pkc1->prev = pkc1->nxt_offset;
839 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
840 prb_thaw_queue(pkc1);
841 _prb_refresh_rx_retire_blk_timer(pkc1);
chetan lokef6fb8f12011-08-19 10:18:16 +0000842
Daniel Borkmann2cbf1962013-05-03 02:57:00 +0000843 smp_wmb();
chetan lokef6fb8f12011-08-19 10:18:16 +0000844}
845
846/*
847 * Queue freeze logic:
848 * 1) Assume tp_block_nr = 8 blocks.
849 * 2) At time 't0', user opens Rx ring.
850 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
851 * 4) user-space is either sleeping or processing block '0'.
852 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
853 * it will close block-7,loop around and try to fill block '0'.
854 * call-flow:
855 * __packet_lookup_frame_in_block
856 * prb_retire_current_block()
857 * prb_dispatch_next_block()
858 * |->(BLOCK_STATUS == USER) evaluates to true
859 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
860 * 6) Now there are two cases:
861 * 6.1) Link goes idle right after the queue is frozen.
862 * But remember, the last open_block() refreshed the timer.
863 * When this timer expires,it will refresh itself so that we can
864 * re-open block-0 in near future.
865 * 6.2) Link is busy and keeps on receiving packets. This is a simple
866 * case and __packet_lookup_frame_in_block will check if block-0
867 * is free and can now be re-used.
868 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000869static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000870 struct packet_sock *po)
871{
872 pkc->reset_pending_on_curr_blk = 1;
873 po->stats_u.stats3.tp_freeze_q_cnt++;
874}
875
876#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
877
878/*
879 * If the next block is free then we will dispatch it
880 * and return a good offset.
881 * Else, we will freeze the queue.
882 * So, caller must check the return value.
883 */
chetan lokebc59ba32011-08-25 10:43:30 +0000884static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000885 struct packet_sock *po)
886{
chetan lokebc59ba32011-08-25 10:43:30 +0000887 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000888
889 smp_rmb();
890
891 /* 1. Get current block num */
892 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
893
894 /* 2. If this block is currently in_use then freeze the queue */
895 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
896 prb_freeze_queue(pkc, po);
897 return NULL;
898 }
899
900 /*
901 * 3.
902 * open this block and return the offset where the first packet
903 * needs to get stored.
904 */
905 prb_open_block(pkc, pbd);
906 return (void *)pkc->nxt_offset;
907}
908
chetan lokebc59ba32011-08-25 10:43:30 +0000909static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000910 struct packet_sock *po, unsigned int status)
911{
chetan lokebc59ba32011-08-25 10:43:30 +0000912 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f12011-08-19 10:18:16 +0000913
914 /* retire/close the current block */
915 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
916 /*
917 * Plug the case where copy_bits() is in progress on
918 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
919 * have space to copy the pkt in the current block and
920 * called prb_retire_current_block()
921 *
922 * We don't need to worry about the TMO case because
923 * the timer-handler already handled this case.
924 */
925 if (!(status & TP_STATUS_BLK_TMO)) {
926 while (atomic_read(&pkc->blk_fill_in_prog)) {
927 /* Waiting for skb_copy_bits to finish... */
928 cpu_relax();
929 }
930 }
931 prb_close_block(pkc, pbd, po, status);
932 return;
933 }
chetan lokef6fb8f12011-08-19 10:18:16 +0000934}
935
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000936static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000937 struct tpacket_block_desc *pbd)
chetan lokef6fb8f12011-08-19 10:18:16 +0000938{
939 return TP_STATUS_USER & BLOCK_STATUS(pbd);
940}
941
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000942static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000943{
944 return pkc->reset_pending_on_curr_blk;
945}
946
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000947static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +0000948{
chetan lokebc59ba32011-08-25 10:43:30 +0000949 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f12011-08-19 10:18:16 +0000950 atomic_dec(&pkc->blk_fill_in_prog);
951}
952
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000953static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000954 struct tpacket3_hdr *ppd)
955{
956 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
957}
958
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000959static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000960 struct tpacket3_hdr *ppd)
961{
962 ppd->hv1.tp_rxhash = 0;
963}
964
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000965static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000966 struct tpacket3_hdr *ppd)
967{
968 if (vlan_tx_tag_present(pkc->skb)) {
969 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
970 ppd->tp_status = TP_STATUS_VLAN_VALID;
971 } else {
972 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
973 }
974}
975
chetan lokebc59ba32011-08-25 10:43:30 +0000976static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000977 struct tpacket3_hdr *ppd)
978{
979 prb_fill_vlan_info(pkc, ppd);
980
981 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
982 prb_fill_rxhash(pkc, ppd);
983 else
984 prb_clear_rxhash(pkc, ppd);
985}
986
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000987static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000988 struct tpacket_kbdq_core *pkc,
989 struct tpacket_block_desc *pbd,
chetan lokef6fb8f12011-08-19 10:18:16 +0000990 unsigned int len)
991{
992 struct tpacket3_hdr *ppd;
993
994 ppd = (struct tpacket3_hdr *)curr;
995 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
996 pkc->prev = curr;
997 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
998 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
999 BLOCK_NUM_PKTS(pbd) += 1;
1000 atomic_inc(&pkc->blk_fill_in_prog);
1001 prb_run_all_ft_ops(pkc, ppd);
1002}
1003
1004/* Assumes caller has the sk->rx_queue.lock */
1005static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1006 struct sk_buff *skb,
1007 int status,
1008 unsigned int len
1009 )
1010{
chetan lokebc59ba32011-08-25 10:43:30 +00001011 struct tpacket_kbdq_core *pkc;
1012 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +00001013 char *curr, *end;
1014
1015 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1016 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1017
1018 /* Queue is frozen when user space is lagging behind */
1019 if (prb_queue_frozen(pkc)) {
1020 /*
1021 * Check if that last block which caused the queue to freeze,
1022 * is still in_use by user-space.
1023 */
1024 if (prb_curr_blk_in_use(pkc, pbd)) {
1025 /* Can't record this packet */
1026 return NULL;
1027 } else {
1028 /*
1029 * Ok, the block was released by user-space.
1030 * Now let's open that block.
1031 * opening a block also thaws the queue.
1032 * Thawing is a side effect.
1033 */
1034 prb_open_block(pkc, pbd);
1035 }
1036 }
1037
1038 smp_mb();
1039 curr = pkc->nxt_offset;
1040 pkc->skb = skb;
1041 end = (char *) ((char *)pbd + pkc->kblk_size);
1042
1043 /* first try the current block */
1044 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1045 prb_fill_curr_block(curr, pkc, pbd, len);
1046 return (void *)curr;
1047 }
1048
1049 /* Ok, close the current block */
1050 prb_retire_current_block(pkc, po, 0);
1051
1052 /* Now, try to dispatch the next block */
1053 curr = (char *)prb_dispatch_next_block(pkc, po);
1054 if (curr) {
1055 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1056 prb_fill_curr_block(curr, pkc, pbd, len);
1057 return (void *)curr;
1058 }
1059
1060 /*
1061 * No free blocks are available.user_space hasn't caught up yet.
1062 * Queue was just frozen and now this packet will get dropped.
1063 */
1064 return NULL;
1065}
1066
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001067static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001068 struct sk_buff *skb,
1069 int status, unsigned int len)
1070{
1071 char *curr = NULL;
1072 switch (po->tp_version) {
1073 case TPACKET_V1:
1074 case TPACKET_V2:
1075 curr = packet_lookup_frame(po, &po->rx_ring,
1076 po->rx_ring.head, status);
1077 return curr;
1078 case TPACKET_V3:
1079 return __packet_lookup_frame_in_block(po, skb, status, len);
1080 default:
1081 WARN(1, "TPACKET version not supported\n");
1082 BUG();
1083 return 0;
1084 }
1085}
1086
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001087static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001088 struct packet_ring_buffer *rb,
1089 unsigned int previous,
1090 int status)
1091{
chetan lokebc59ba32011-08-25 10:43:30 +00001092 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1093 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
chetan lokef6fb8f12011-08-19 10:18:16 +00001094
1095 if (status != BLOCK_STATUS(pbd))
1096 return NULL;
1097 return pbd;
1098}
1099
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001100static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +00001101{
1102 unsigned int prev;
1103 if (rb->prb_bdqc.kactive_blk_num)
1104 prev = rb->prb_bdqc.kactive_blk_num-1;
1105 else
1106 prev = rb->prb_bdqc.knum_blocks-1;
1107 return prev;
1108}
1109
1110/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001111static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001112 struct packet_ring_buffer *rb,
1113 int status)
1114{
1115 unsigned int previous = prb_previous_blk_num(rb);
1116 return prb_lookup_block(po, rb, previous, status);
1117}
1118
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001119static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001120 struct packet_ring_buffer *rb,
1121 int status)
1122{
1123 if (po->tp_version <= TPACKET_V2)
1124 return packet_previous_frame(po, rb, status);
1125
1126 return __prb_previous_block(po, rb, status);
1127}
1128
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001129static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001130 struct packet_ring_buffer *rb)
1131{
1132 switch (po->tp_version) {
1133 case TPACKET_V1:
1134 case TPACKET_V2:
1135 return packet_increment_head(rb);
1136 case TPACKET_V3:
1137 default:
1138 WARN(1, "TPACKET version not supported.\n");
1139 BUG();
1140 return;
1141 }
1142}
1143
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001144static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001145 struct packet_ring_buffer *rb,
1146 int status)
1147{
1148 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1149 return packet_lookup_frame(po, rb, previous, status);
1150}
1151
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001152static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001153{
1154 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1155}
1156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157static void packet_sock_destruct(struct sock *sk)
1158{
Richard Cochraned85b562010-04-07 22:41:28 +00001159 skb_queue_purge(&sk->sk_error_queue);
1160
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001161 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1162 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 if (!sock_flag(sk, SOCK_DEAD)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001165 pr_err("Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 return;
1167 }
1168
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001169 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171
David S. Millerdc99f602011-07-05 01:45:05 -07001172static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1173{
1174 u32 idx, hash = skb->rxhash;
1175
1176 idx = ((u64)hash * num) >> 32;
1177
1178 return f->arr[idx];
1179}
1180
1181static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1182{
José Adolfo Galdámez9cc712e2015-10-21 21:52:13 -06001183 unsigned int val = atomic_inc_return(&f->rr_cur);
David S. Millerdc99f602011-07-05 01:45:05 -07001184
José Adolfo Galdámez9cc712e2015-10-21 21:52:13 -06001185 return f->arr[val % num];
David S. Millerdc99f602011-07-05 01:45:05 -07001186}
1187
David S. Miller95ec3eb2011-07-06 01:56:38 -07001188static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1189{
1190 unsigned int cpu = smp_processor_id();
1191
1192 return f->arr[cpu % num];
1193}
1194
David S. Miller95ec3eb2011-07-06 01:56:38 -07001195static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1196 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001197{
1198 struct packet_fanout *f = pt->af_packet_priv;
José Adolfo Galdámez9cc712e2015-10-21 21:52:13 -06001199 unsigned int num = ACCESS_ONCE(f->num_members);
David S. Millerdc99f602011-07-05 01:45:05 -07001200 struct packet_sock *po;
1201 struct sock *sk;
1202
1203 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1204 !num) {
1205 kfree_skb(skb);
1206 return 0;
1207 }
1208
David S. Miller95ec3eb2011-07-06 01:56:38 -07001209 switch (f->type) {
1210 case PACKET_FANOUT_HASH:
1211 default:
1212 if (f->defrag) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001213 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001214 if (!skb)
1215 return 0;
1216 }
1217 skb_get_rxhash(skb);
1218 sk = fanout_demux_hash(f, skb, num);
1219 break;
1220 case PACKET_FANOUT_LB:
1221 sk = fanout_demux_lb(f, skb, num);
1222 break;
1223 case PACKET_FANOUT_CPU:
1224 sk = fanout_demux_cpu(f, skb, num);
1225 break;
David S. Miller7736d332011-07-05 01:43:20 -07001226 }
1227
David S. Millerdc99f602011-07-05 01:45:05 -07001228 po = pkt_sk(sk);
1229
1230 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1231}
1232
1233static DEFINE_MUTEX(fanout_mutex);
1234static LIST_HEAD(fanout_list);
1235
1236static void __fanout_link(struct sock *sk, struct packet_sock *po)
1237{
1238 struct packet_fanout *f = po->fanout;
1239
1240 spin_lock(&f->lock);
1241 f->arr[f->num_members] = sk;
1242 smp_wmb();
1243 f->num_members++;
1244 spin_unlock(&f->lock);
1245}
1246
1247static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1248{
1249 struct packet_fanout *f = po->fanout;
1250 int i;
1251
1252 spin_lock(&f->lock);
1253 for (i = 0; i < f->num_members; i++) {
1254 if (f->arr[i] == sk)
1255 break;
1256 }
1257 BUG_ON(i >= f->num_members);
1258 f->arr[i] = f->arr[f->num_members - 1];
1259 f->num_members--;
1260 spin_unlock(&f->lock);
1261}
1262
Eric Leblond9e296be2012-08-16 22:02:58 +00001263bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1264{
1265 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1266 return true;
1267
1268 return false;
1269}
1270
David S. Miller7736d332011-07-05 01:43:20 -07001271static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001272{
1273 struct packet_sock *po = pkt_sk(sk);
1274 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001275 u8 type = type_flags & 0xff;
1276 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001277 int err;
1278
1279 switch (type) {
1280 case PACKET_FANOUT_HASH:
1281 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001282 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001283 break;
1284 default:
1285 return -EINVAL;
1286 }
1287
1288 if (!po->running)
1289 return -EINVAL;
1290
1291 if (po->fanout)
1292 return -EALREADY;
1293
1294 mutex_lock(&fanout_mutex);
1295 match = NULL;
1296 list_for_each_entry(f, &fanout_list, list) {
1297 if (f->id == id &&
1298 read_pnet(&f->net) == sock_net(sk)) {
1299 match = f;
1300 break;
1301 }
1302 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001303 err = -EINVAL;
David S. Miller7736d332011-07-05 01:43:20 -07001304 if (match && match->defrag != defrag)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001305 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001306 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001307 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001308 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001309 if (!match)
1310 goto out;
1311 write_pnet(&match->net, sock_net(sk));
1312 match->id = id;
1313 match->type = type;
1314 match->defrag = defrag;
1315 atomic_set(&match->rr_cur, 0);
1316 INIT_LIST_HEAD(&match->list);
1317 spin_lock_init(&match->lock);
1318 atomic_set(&match->sk_ref, 0);
1319 match->prot_hook.type = po->prot_hook.type;
1320 match->prot_hook.dev = po->prot_hook.dev;
1321 match->prot_hook.func = packet_rcv_fanout;
1322 match->prot_hook.af_packet_priv = match;
Eric Leblond9e296be2012-08-16 22:02:58 +00001323 match->prot_hook.id_match = match_fanout_group;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001324 dev_add_pack(&match->prot_hook);
1325 list_add(&match->list, &fanout_list);
1326 }
1327 err = -EINVAL;
1328 if (match->type == type &&
1329 match->prot_hook.type == po->prot_hook.type &&
1330 match->prot_hook.dev == po->prot_hook.dev) {
1331 err = -ENOSPC;
1332 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1333 __dev_remove_pack(&po->prot_hook);
1334 po->fanout = match;
1335 atomic_inc(&match->sk_ref);
1336 __fanout_link(sk, po);
1337 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001338 }
1339 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001340out:
David S. Millerdc99f602011-07-05 01:45:05 -07001341 mutex_unlock(&fanout_mutex);
1342 return err;
1343}
1344
1345static void fanout_release(struct sock *sk)
1346{
1347 struct packet_sock *po = pkt_sk(sk);
1348 struct packet_fanout *f;
1349
1350 f = po->fanout;
1351 if (!f)
1352 return;
1353
1354 po->fanout = NULL;
1355
1356 mutex_lock(&fanout_mutex);
1357 if (atomic_dec_and_test(&f->sk_ref)) {
1358 list_del(&f->list);
1359 dev_remove_pack(&f->prot_hook);
1360 kfree(f);
1361 }
1362 mutex_unlock(&fanout_mutex);
1363}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001365static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001367static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001369static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1370 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371{
1372 struct sock *sk;
1373 struct sockaddr_pkt *spkt;
1374
1375 /*
1376 * When we registered the protocol we saved the socket in the data
1377 * field for just this event.
1378 */
1379
1380 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 /*
1383 * Yank back the headers [hope the device set this
1384 * right or kerboom...]
1385 *
1386 * Incoming packets have ll header pulled,
1387 * push it back.
1388 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001389 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 * so that this procedure is noop.
1391 */
1392
1393 if (skb->pkt_type == PACKET_LOOPBACK)
1394 goto out;
1395
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001396 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001397 goto out;
1398
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001399 skb = skb_share_check(skb, GFP_ATOMIC);
1400 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 goto oom;
1402
1403 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001404 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Phil Oester84531c22005-07-12 11:57:52 -07001406 /* drop conntrack reference */
1407 nf_reset(skb);
1408
Herbert Xuffbc6112007-02-04 23:33:10 -08001409 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001411 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
1413 /*
1414 * The SOCK_PACKET socket receives _all_ frames.
1415 */
1416
1417 spkt->spkt_family = dev->type;
1418 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1419 spkt->spkt_protocol = skb->protocol;
1420
1421 /*
1422 * Charge the memory to the socket. This is done specifically
1423 * to prevent sockets using all the memory up.
1424 */
1425
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001426 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 return 0;
1428
1429out:
1430 kfree_skb(skb);
1431oom:
1432 return 0;
1433}
1434
1435
1436/*
1437 * Output a raw packet to a device layer. This bypasses all the other
1438 * protocol layers and you must therefore supply it with a complete frame
1439 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1442 struct msghdr *msg, size_t len)
1443{
1444 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001445 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001446 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001448 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 int err;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001450 int extra_len = 0;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001453 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 */
1455
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001456 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001458 return -EINVAL;
1459 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1460 proto = saddr->spkt_protocol;
1461 } else
1462 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
1464 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001465 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 */
1467
1468 saddr->spkt_device[13] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001469retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001470 rcu_read_lock();
1471 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 err = -ENODEV;
1473 if (dev == NULL)
1474 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001475
David S. Millerd5e76b02007-01-25 19:30:36 -08001476 err = -ENETDOWN;
1477 if (!(dev->flags & IFF_UP))
1478 goto out_unlock;
1479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001481 * You may not queue a frame bigger than the mtu. This is the lowest level
1482 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001484
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001485 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1486 if (!netif_supports_nofcs(dev)) {
1487 err = -EPROTONOSUPPORT;
1488 goto out_unlock;
1489 }
1490 extra_len = 4; /* We're doing our own CRC */
1491 }
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001494 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 goto out_unlock;
1496
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001497 if (!skb) {
1498 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001499 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001500 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001502 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001503 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001504 if (skb == NULL)
1505 return -ENOBUFS;
1506 /* FIXME: Save some space for broken drivers that write a hard
1507 * header at transmission time by themselves. PPP is the notable
1508 * one here. This should really be fixed at the driver level.
1509 */
1510 skb_reserve(skb, reserved);
1511 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001512
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001513 /* Try to align data part correctly */
1514 if (hhlen) {
1515 skb->data -= hhlen;
1516 skb->tail -= hhlen;
1517 if (len < hhlen)
1518 skb_reset_network_header(skb);
1519 }
1520 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1521 if (err)
1522 goto out_free;
1523 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
1525
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001526 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00001527 /* Earlier code assumed this would be a VLAN pkt,
1528 * double-check this now that we have the actual
1529 * packet in hand.
1530 */
1531 struct ethhdr *ehdr;
1532 skb_reset_mac_header(skb);
1533 ehdr = eth_hdr(skb);
1534 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1535 err = -EMSGSIZE;
1536 goto out_unlock;
1537 }
1538 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001539
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 skb->protocol = proto;
1541 skb->dev = dev;
1542 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001543 skb->mark = sk->sk_mark;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001544 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00001545 if (err < 0)
1546 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001548 if (unlikely(extra_len == 4))
1549 skb->no_fcs = 1;
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001552 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001553 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001556 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001557out_free:
1558 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 return err;
1560}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001562static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001563 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001564 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
1566 struct sk_filter *filter;
1567
Eric Dumazet80f8f102011-01-18 07:46:52 +00001568 rcu_read_lock();
1569 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001570 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001571 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001572 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
David S. Millerdbcb5852007-01-24 15:21:02 -08001574 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
1577/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001578 * This function makes lazy skb cloning in hope that most of packets
1579 * are discarded by BPF.
1580 *
1581 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1582 * and skb->cb are mangled. It works because (and until) packets
1583 * falling here are owned by current CPU. Output packets are cloned
1584 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1585 * sequencially, so that if we return skb to original state on exit,
1586 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 */
1588
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001589static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1590 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
1592 struct sock *sk;
1593 struct sockaddr_ll *sll;
1594 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001595 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001597 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 if (skb->pkt_type == PACKET_LOOPBACK)
1600 goto drop;
1601
1602 sk = pt->af_packet_priv;
1603 po = pkt_sk(sk);
1604
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001605 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001606 goto drop;
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 skb->dev = dev;
1609
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001610 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001612 * exported to higher levels.
1613 *
1614 * Otherwise, the device hides details of its frame
1615 * structure, so that corresponding packet head is
1616 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 */
1618 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001619 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 else if (skb->pkt_type == PACKET_OUTGOING) {
1621 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001622 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 }
1624 }
1625
1626 snaplen = skb->len;
1627
David S. Millerdbcb5852007-01-24 15:21:02 -08001628 res = run_filter(skb, sk, snaplen);
1629 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001630 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001631 if (snaplen > res)
1632 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001634 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 goto drop_n_acct;
1636
1637 if (skb_shared(skb)) {
1638 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1639 if (nskb == NULL)
1640 goto drop_n_acct;
1641
1642 if (skb_head != skb->data) {
1643 skb->data = skb_head;
1644 skb->len = skb_len;
1645 }
1646 kfree_skb(skb);
1647 skb = nskb;
1648 }
1649
Herbert Xuffbc6112007-02-04 23:33:10 -08001650 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1651 sizeof(skb->cb));
1652
1653 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 sll->sll_family = AF_PACKET;
1655 sll->sll_hatype = dev->type;
1656 sll->sll_protocol = skb->protocol;
1657 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001658 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001659 sll->sll_ifindex = orig_dev->ifindex;
1660 else
1661 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001663 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
Herbert Xuffbc6112007-02-04 23:33:10 -08001665 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 if (pskb_trim(skb, snaplen))
1668 goto drop_n_acct;
1669
1670 skb_set_owner_r(skb, sk);
1671 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001672 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Phil Oester84531c22005-07-12 11:57:52 -07001674 /* drop conntrack reference */
1675 nf_reset(skb);
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 spin_lock(&sk->sk_receive_queue.lock);
1678 po->stats.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001679 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 __skb_queue_tail(&sk->sk_receive_queue, skb);
1681 spin_unlock(&sk->sk_receive_queue.lock);
1682 sk->sk_data_ready(sk, skb->len);
1683 return 0;
1684
1685drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001686 spin_lock(&sk->sk_receive_queue.lock);
1687 po->stats.tp_drops++;
1688 atomic_inc(&sk->sk_drops);
1689 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691drop_n_restore:
1692 if (skb_head != skb->data && skb_shared(skb)) {
1693 skb->data = skb_head;
1694 skb->len = skb_len;
1695 }
1696drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001697 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return 0;
1699}
1700
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001701static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1702 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
1704 struct sock *sk;
1705 struct packet_sock *po;
1706 struct sockaddr_ll *sll;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001707 union {
1708 struct tpacket_hdr *h1;
1709 struct tpacket2_hdr *h2;
chetan lokef6fb8f12011-08-19 10:18:16 +00001710 struct tpacket3_hdr *h3;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001711 void *raw;
1712 } h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001713 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001715 unsigned int snaplen, res;
chetan lokef6fb8f12011-08-19 10:18:16 +00001716 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001717 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 struct sk_buff *copy_skb = NULL;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001719 struct timeval tv;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001720 struct timespec ts;
Scott McMillan614f60f2010-06-02 05:53:56 -07001721 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 if (skb->pkt_type == PACKET_LOOPBACK)
1724 goto drop;
1725
1726 sk = pt->af_packet_priv;
1727 po = pkt_sk(sk);
1728
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001729 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001730 goto drop;
1731
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001732 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001734 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 else if (skb->pkt_type == PACKET_OUTGOING) {
1736 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001737 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 }
1739 }
1740
Herbert Xu8dc41942007-02-04 23:31:32 -08001741 if (skb->ip_summed == CHECKSUM_PARTIAL)
1742 status |= TP_STATUS_CSUMNOTREADY;
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 snaplen = skb->len;
1745
David S. Millerdbcb5852007-01-24 15:21:02 -08001746 res = run_filter(skb, sk, snaplen);
1747 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001748 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001749 if (snaplen > res)
1750 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy89133362008-07-18 18:05:19 -07001753 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1754 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 } else {
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001756 unsigned maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001757 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy89133362008-07-18 18:05:19 -07001758 (maclen < 16 ? 16 : maclen)) +
1759 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 macoff = netoff - maclen;
1761 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001762 if (po->tp_version <= TPACKET_V2) {
1763 if (macoff + snaplen > po->rx_ring.frame_size) {
1764 if (po->copy_thresh &&
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001765 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
chetan lokef6fb8f12011-08-19 10:18:16 +00001766 if (skb_shared(skb)) {
1767 copy_skb = skb_clone(skb, GFP_ATOMIC);
1768 } else {
1769 copy_skb = skb_get(skb);
1770 skb_head = skb->data;
1771 }
1772 if (copy_skb)
1773 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001775 snaplen = po->rx_ring.frame_size - macoff;
1776 if ((int)snaplen < 0)
1777 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00001781 h.raw = packet_current_rx_frame(po, skb,
1782 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001783 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 goto ring_is_full;
chetan lokef6fb8f12011-08-19 10:18:16 +00001785 if (po->tp_version <= TPACKET_V2) {
1786 packet_increment_rx_head(po, &po->rx_ring);
1787 /*
1788 * LOSING will be reported till you read the stats,
1789 * because it's COR - Clear On Read.
1790 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1791 * at packet level.
1792 */
1793 if (po->stats.tp_drops)
1794 status |= TP_STATUS_LOSING;
1795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 po->stats.tp_packets++;
1797 if (copy_skb) {
1798 status |= TP_STATUS_COPY;
1799 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 spin_unlock(&sk->sk_receive_queue.lock);
1802
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001803 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001805 switch (po->tp_version) {
1806 case TPACKET_V1:
1807 h.h1->tp_len = skb->len;
1808 h.h1->tp_snaplen = snaplen;
1809 h.h1->tp_mac = macoff;
1810 h.h1->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001811 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1812 && shhwtstamps->syststamp.tv64)
1813 tv = ktime_to_timeval(shhwtstamps->syststamp);
1814 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1815 && shhwtstamps->hwtstamp.tv64)
1816 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1817 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001818 tv = ktime_to_timeval(skb->tstamp);
1819 else
1820 do_gettimeofday(&tv);
1821 h.h1->tp_sec = tv.tv_sec;
1822 h.h1->tp_usec = tv.tv_usec;
1823 hdrlen = sizeof(*h.h1);
1824 break;
1825 case TPACKET_V2:
1826 h.h2->tp_len = skb->len;
1827 h.h2->tp_snaplen = snaplen;
1828 h.h2->tp_mac = macoff;
1829 h.h2->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001830 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1831 && shhwtstamps->syststamp.tv64)
1832 ts = ktime_to_timespec(shhwtstamps->syststamp);
1833 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1834 && shhwtstamps->hwtstamp.tv64)
1835 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1836 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001837 ts = ktime_to_timespec(skb->tstamp);
1838 else
1839 getnstimeofday(&ts);
1840 h.h2->tp_sec = ts.tv_sec;
1841 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001842 if (vlan_tx_tag_present(skb)) {
1843 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1844 status |= TP_STATUS_VLAN_VALID;
1845 } else {
1846 h.h2->tp_vlan_tci = 0;
1847 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001848 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001849 hdrlen = sizeof(*h.h2);
1850 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00001851 case TPACKET_V3:
1852 /* tp_nxt_offset,vlan are already populated above.
1853 * So DONT clear those fields here
1854 */
1855 h.h3->tp_status |= status;
1856 h.h3->tp_len = skb->len;
1857 h.h3->tp_snaplen = snaplen;
1858 h.h3->tp_mac = macoff;
1859 h.h3->tp_net = netoff;
1860 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1861 && shhwtstamps->syststamp.tv64)
1862 ts = ktime_to_timespec(shhwtstamps->syststamp);
1863 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1864 && shhwtstamps->hwtstamp.tv64)
1865 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1866 else if (skb->tstamp.tv64)
1867 ts = ktime_to_timespec(skb->tstamp);
1868 else
1869 getnstimeofday(&ts);
1870 h.h3->tp_sec = ts.tv_sec;
1871 h.h3->tp_nsec = ts.tv_nsec;
1872 hdrlen = sizeof(*h.h3);
1873 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001874 default:
1875 BUG();
1876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001878 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001879 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 sll->sll_family = AF_PACKET;
1881 sll->sll_hatype = dev->type;
1882 sll->sll_protocol = skb->protocol;
1883 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001884 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001885 sll->sll_ifindex = orig_dev->ifindex;
1886 else
1887 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Ralf Baechlee16aa202006-12-07 00:11:33 -08001889 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001890#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001892 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
chetan lokef6fb8f12011-08-19 10:18:16 +00001894 if (po->tp_version <= TPACKET_V2) {
1895 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1896 + macoff + snaplen);
1897 for (start = h.raw; start < end; start += PAGE_SIZE)
1898 flush_dcache_page(pgv_to_page(start));
1899 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001900 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001902#endif
chetan lokef6fb8f12011-08-19 10:18:16 +00001903 if (po->tp_version <= TPACKET_V2)
1904 __packet_set_status(po, h.raw, status);
1905 else
1906 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
1908 sk->sk_data_ready(sk, 0);
1909
1910drop_n_restore:
1911 if (skb_head != skb->data && skb_shared(skb)) {
1912 skb->data = skb_head;
1913 skb->len = skb_len;
1914 }
1915drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001916 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 return 0;
1918
1919ring_is_full:
1920 po->stats.tp_drops++;
1921 spin_unlock(&sk->sk_receive_queue.lock);
1922
1923 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001924 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 goto drop_n_restore;
1926}
1927
Johann Baudy69e3c752009-05-18 22:11:22 -07001928static void tpacket_destruct_skb(struct sk_buff *skb)
1929{
1930 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001931 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001932
Johann Baudy69e3c752009-05-18 22:11:22 -07001933 if (likely(po->tx_ring.pg_vec)) {
1934 ph = skb_shinfo(skb)->destructor_arg;
Johann Baudy69e3c752009-05-18 22:11:22 -07001935 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1936 atomic_dec(&po->tx_ring.pending);
1937 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1938 }
1939
1940 sock_wfree(skb);
1941}
1942
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001943static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1944 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001945 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001946{
1947 union {
1948 struct tpacket_hdr *h1;
1949 struct tpacket2_hdr *h2;
1950 void *raw;
1951 } ph;
1952 int to_write, offset, len, tp_len, nr_frags, len_max;
1953 struct socket *sock = po->sk.sk_socket;
1954 struct page *page;
1955 void *data;
1956 int err;
1957
1958 ph.raw = frame;
1959
1960 skb->protocol = proto;
1961 skb->dev = dev;
1962 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001963 skb->mark = po->sk.sk_mark;
Johann Baudy69e3c752009-05-18 22:11:22 -07001964 skb_shinfo(skb)->destructor_arg = ph.raw;
1965
1966 switch (po->tp_version) {
1967 case TPACKET_V2:
1968 tp_len = ph.h2->tp_len;
1969 break;
1970 default:
1971 tp_len = ph.h1->tp_len;
1972 break;
1973 }
1974 if (unlikely(tp_len > size_max)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001975 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
Johann Baudy69e3c752009-05-18 22:11:22 -07001976 return -EMSGSIZE;
1977 }
1978
Herbert Xuae641942011-11-18 02:20:04 +00001979 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07001980 skb_reset_network_header(skb);
1981
1982 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1983 to_write = tp_len;
1984
1985 if (sock->type == SOCK_DGRAM) {
1986 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1987 NULL, tp_len);
1988 if (unlikely(err < 0))
1989 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001990 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07001991 /* net device doesn't like empty head */
1992 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001993 pr_err("packet size is too short (%d < %d)\n",
1994 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07001995 return -EINVAL;
1996 }
1997
1998 skb_push(skb, dev->hard_header_len);
1999 err = skb_store_bits(skb, 0, data,
2000 dev->hard_header_len);
2001 if (unlikely(err))
2002 return err;
2003
2004 data += dev->hard_header_len;
2005 to_write -= dev->hard_header_len;
2006 }
2007
2008 err = -EFAULT;
Johann Baudy69e3c752009-05-18 22:11:22 -07002009 offset = offset_in_page(data);
2010 len_max = PAGE_SIZE - offset;
2011 len = ((to_write > len_max) ? len_max : to_write);
2012
2013 skb->data_len = to_write;
2014 skb->len += to_write;
2015 skb->truesize += to_write;
2016 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2017
2018 while (likely(to_write)) {
2019 nr_frags = skb_shinfo(skb)->nr_frags;
2020
2021 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002022 pr_err("Packet exceed the number of skb frags(%lu)\n",
2023 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07002024 return -EFAULT;
2025 }
2026
Changli Gao0af55bb2010-12-01 02:52:20 +00002027 page = pgv_to_page(data);
2028 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07002029 flush_dcache_page(page);
2030 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00002031 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002032 to_write -= len;
2033 offset = 0;
2034 len_max = PAGE_SIZE;
2035 len = ((to_write > len_max) ? len_max : to_write);
2036 }
2037
2038 return tp_len;
2039}
2040
Daniel Borkmann63485172013-11-21 16:50:58 +01002041static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2042{
2043 struct net_device *dev;
2044
2045 rcu_read_lock();
2046 dev = rcu_dereference(po->cached_dev);
2047 if (dev)
2048 dev_hold(dev);
2049 rcu_read_unlock();
2050
2051 return dev;
2052}
2053
Johann Baudy69e3c752009-05-18 22:11:22 -07002054static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2055{
Johann Baudy69e3c752009-05-18 22:11:22 -07002056 struct sk_buff *skb;
2057 struct net_device *dev;
2058 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002059 int err, reserve = 0;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002060 void *ph;
2061 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07002062 int tp_len, size_max;
2063 unsigned char *addr;
2064 int len_sum = 0;
2065 int status = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002066 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07002067
Johann Baudy69e3c752009-05-18 22:11:22 -07002068 mutex_lock(&po->pg_vec_lock);
2069
2070 err = -EBUSY;
2071 if (saddr == NULL) {
Daniel Borkmann63485172013-11-21 16:50:58 +01002072 dev = packet_cached_dev_get(po);
Johann Baudy69e3c752009-05-18 22:11:22 -07002073 proto = po->num;
2074 addr = NULL;
2075 } else {
2076 err = -EINVAL;
2077 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2078 goto out;
2079 if (msg->msg_namelen < (saddr->sll_halen
2080 + offsetof(struct sockaddr_ll,
2081 sll_addr)))
2082 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002083 proto = saddr->sll_protocol;
2084 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002085 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
Johann Baudy69e3c752009-05-18 22:11:22 -07002086 }
2087
Johann Baudy69e3c752009-05-18 22:11:22 -07002088 err = -ENXIO;
2089 if (unlikely(dev == NULL))
2090 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002091 err = -ENETDOWN;
2092 if (unlikely(!(dev->flags & IFF_UP)))
2093 goto out_put;
2094
Daniel Borkmann63485172013-11-21 16:50:58 +01002095 reserve = dev->hard_header_len;
2096
Johann Baudy69e3c752009-05-18 22:11:22 -07002097 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002098 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002099
2100 if (size_max > dev->mtu + reserve)
2101 size_max = dev->mtu + reserve;
2102
2103 do {
2104 ph = packet_current_frame(po, &po->tx_ring,
2105 TP_STATUS_SEND_REQUEST);
2106
2107 if (unlikely(ph == NULL)) {
2108 schedule();
2109 continue;
2110 }
2111
2112 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002113 hlen = LL_RESERVED_SPACE(dev);
2114 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002115 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002116 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002117 0, &err);
2118
2119 if (unlikely(skb == NULL))
2120 goto out_status;
2121
2122 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002123 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002124
2125 if (unlikely(tp_len < 0)) {
2126 if (po->tp_loss) {
2127 __packet_set_status(po, ph,
2128 TP_STATUS_AVAILABLE);
2129 packet_increment_head(&po->tx_ring);
2130 kfree_skb(skb);
2131 continue;
2132 } else {
2133 status = TP_STATUS_WRONG_FORMAT;
2134 err = tp_len;
2135 goto out_status;
2136 }
2137 }
2138
2139 skb->destructor = tpacket_destruct_skb;
2140 __packet_set_status(po, ph, TP_STATUS_SENDING);
2141 atomic_inc(&po->tx_ring.pending);
2142
2143 status = TP_STATUS_SEND_REQUEST;
2144 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002145 if (unlikely(err > 0)) {
2146 err = net_xmit_errno(err);
2147 if (err && __packet_get_status(po, ph) ==
2148 TP_STATUS_AVAILABLE) {
2149 /* skb was destructed already */
2150 skb = NULL;
2151 goto out_status;
2152 }
2153 /*
2154 * skb was dropped but not destructed yet;
2155 * let's treat it like congestion or err < 0
2156 */
2157 err = 0;
2158 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002159 packet_increment_head(&po->tx_ring);
2160 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002161 } while (likely((ph != NULL) ||
2162 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2163 (atomic_read(&po->tx_ring.pending))))
2164 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002165
2166 err = len_sum;
2167 goto out_put;
2168
Johann Baudy69e3c752009-05-18 22:11:22 -07002169out_status:
2170 __packet_set_status(po, ph, status);
2171 kfree_skb(skb);
2172out_put:
Daniel Borkmann63485172013-11-21 16:50:58 +01002173 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002174out:
2175 mutex_unlock(&po->pg_vec_lock);
2176 return err;
2177}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002179static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2180 size_t reserve, size_t len,
2181 size_t linear, int noblock,
2182 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002183{
2184 struct sk_buff *skb;
2185
2186 /* Under a page? Don't bother with paged skb. */
2187 if (prepad + len < PAGE_SIZE || !linear)
2188 linear = len;
2189
2190 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2191 err);
2192 if (!skb)
2193 return NULL;
2194
2195 skb_reserve(skb, reserve);
2196 skb_put(skb, linear);
2197 skb->data_len = len - linear;
2198 skb->len += len - linear;
2199
2200 return skb;
2201}
2202
Johann Baudy69e3c752009-05-18 22:11:22 -07002203static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 struct msghdr *msg, size_t len)
2205{
2206 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002207 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 struct sk_buff *skb;
2209 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002210 __be16 proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002212 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002213 struct virtio_net_hdr vnet_hdr = { 0 };
2214 int offset = 0;
2215 int vnet_hdr_len;
2216 struct packet_sock *po = pkt_sk(sk);
2217 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002218 int hlen, tlen;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002219 int extra_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
2221 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002222 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 if (saddr == NULL) {
Daniel Borkmann63485172013-11-21 16:50:58 +01002226 dev = packet_cached_dev_get(po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 proto = po->num;
2228 addr = NULL;
2229 } else {
2230 err = -EINVAL;
2231 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2232 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002233 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2234 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 proto = saddr->sll_protocol;
2236 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002237 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 }
2239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 err = -ENXIO;
Daniel Borkmann63485172013-11-21 16:50:58 +01002241 if (unlikely(dev == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 goto out_unlock;
Daniel Borkmann63485172013-11-21 16:50:58 +01002243 err = -ENETDOWN;
2244 if (unlikely(!(dev->flags & IFF_UP)))
2245 goto out_unlock;
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 if (sock->type == SOCK_RAW)
2248 reserve = dev->hard_header_len;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002249 if (po->has_vnet_hdr) {
2250 vnet_hdr_len = sizeof(vnet_hdr);
2251
2252 err = -EINVAL;
2253 if (len < vnet_hdr_len)
2254 goto out_unlock;
2255
2256 len -= vnet_hdr_len;
2257
2258 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2259 vnet_hdr_len);
2260 if (err < 0)
2261 goto out_unlock;
2262
2263 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2264 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2265 vnet_hdr.hdr_len))
2266 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2267 vnet_hdr.csum_offset + 2;
2268
2269 err = -EINVAL;
2270 if (vnet_hdr.hdr_len > len)
2271 goto out_unlock;
2272
2273 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2274 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2275 case VIRTIO_NET_HDR_GSO_TCPV4:
2276 gso_type = SKB_GSO_TCPV4;
2277 break;
2278 case VIRTIO_NET_HDR_GSO_TCPV6:
2279 gso_type = SKB_GSO_TCPV6;
2280 break;
2281 case VIRTIO_NET_HDR_GSO_UDP:
2282 gso_type = SKB_GSO_UDP;
2283 break;
2284 default:
2285 goto out_unlock;
2286 }
2287
2288 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2289 gso_type |= SKB_GSO_TCP_ECN;
2290
2291 if (vnet_hdr.gso_size == 0)
2292 goto out_unlock;
2293
2294 }
2295 }
2296
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002297 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2298 if (!netif_supports_nofcs(dev)) {
2299 err = -EPROTONOSUPPORT;
2300 goto out_unlock;
2301 }
2302 extra_len = 4; /* We're doing our own CRC */
2303 }
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002306 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 goto out_unlock;
2308
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002309 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002310 hlen = LL_RESERVED_SPACE(dev);
2311 tlen = dev->needed_tailroom;
2312 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002313 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002314 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 goto out_unlock;
2316
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002317 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002319 err = -EINVAL;
2320 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002321 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002322 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002325 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 if (err)
2327 goto out_free;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002328 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00002329 if (err < 0)
2330 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002332 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00002333 /* Earlier code assumed this would be a VLAN pkt,
2334 * double-check this now that we have the actual
2335 * packet in hand.
2336 */
2337 struct ethhdr *ehdr;
2338 skb_reset_mac_header(skb);
2339 ehdr = eth_hdr(skb);
2340 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2341 err = -EMSGSIZE;
2342 goto out_free;
2343 }
2344 }
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 skb->protocol = proto;
2347 skb->dev = dev;
2348 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002349 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002351 if (po->has_vnet_hdr) {
2352 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2353 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2354 vnet_hdr.csum_offset)) {
2355 err = -EINVAL;
2356 goto out_free;
2357 }
2358 }
2359
2360 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2361 skb_shinfo(skb)->gso_type = gso_type;
2362
2363 /* Header must be checked, and gso_segs computed. */
2364 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2365 skb_shinfo(skb)->gso_segs = 0;
2366
2367 len += vnet_hdr_len;
2368 }
2369
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002370 if (unlikely(extra_len == 4))
2371 skb->no_fcs = 1;
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 /*
2374 * Now send it
2375 */
2376
2377 err = dev_queue_xmit(skb);
2378 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2379 goto out_unlock;
2380
Daniel Borkmann63485172013-11-21 16:50:58 +01002381 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002383 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
2385out_free:
2386 kfree_skb(skb);
2387out_unlock:
Daniel Borkmann63485172013-11-21 16:50:58 +01002388 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 dev_put(dev);
2390out:
2391 return err;
2392}
2393
Johann Baudy69e3c752009-05-18 22:11:22 -07002394static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2395 struct msghdr *msg, size_t len)
2396{
Johann Baudy69e3c752009-05-18 22:11:22 -07002397 struct sock *sk = sock->sk;
2398 struct packet_sock *po = pkt_sk(sk);
2399 if (po->tx_ring.pg_vec)
2400 return tpacket_snd(po, msg);
2401 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002402 return packet_snd(sock, msg, len);
2403}
2404
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405/*
2406 * Close a PACKET socket. This is fairly simple. We immediately go
2407 * to 'closed' state and remove our protocol entry in the device list.
2408 */
2409
2410static int packet_release(struct socket *sock)
2411{
2412 struct sock *sk = sock->sk;
2413 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002414 struct net *net;
chetan lokef6fb8f12011-08-19 10:18:16 +00002415 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
2417 if (!sk)
2418 return 0;
2419
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002420 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 po = pkt_sk(sk);
2422
stephen hemminger808f5112010-02-22 07:57:18 +00002423 spin_lock_bh(&net->packet.sklist_lock);
2424 sk_del_node_init_rcu(sk);
Eric Dumazet920de802008-11-24 00:09:29 -08002425 sock_prot_inuse_add(net, sk->sk_prot, -1);
stephen hemminger808f5112010-02-22 07:57:18 +00002426 spin_unlock_bh(&net->packet.sklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
stephen hemminger808f5112010-02-22 07:57:18 +00002428 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002429 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002430 if (po->prot_hook.dev) {
2431 dev_put(po->prot_hook.dev);
2432 po->prot_hook.dev = NULL;
2433 }
stephen hemminger808f5112010-02-22 07:57:18 +00002434 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
Phil Sutterf60f8542013-02-01 07:21:41 +00002438 if (po->rx_ring.pg_vec) {
2439 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002440 packet_set_ring(sk, &req_u, 1, 0);
Phil Sutterf60f8542013-02-01 07:21:41 +00002441 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002442
Phil Sutterf60f8542013-02-01 07:21:41 +00002443 if (po->tx_ring.pg_vec) {
2444 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002445 packet_set_ring(sk, &req_u, 1, 1);
Phil Sutterf60f8542013-02-01 07:21:41 +00002446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
David S. Millerdc99f602011-07-05 01:45:05 -07002448 fanout_release(sk);
2449
stephen hemminger808f5112010-02-22 07:57:18 +00002450 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 /*
2452 * Now the socket is dead. No more input will appear.
2453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 sock_orphan(sk);
2455 sock->sk = NULL;
2456
2457 /* Purge queues */
2458
2459 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002460 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
2462 sock_put(sk);
2463 return 0;
2464}
2465
2466/*
2467 * Attach a packet hook.
2468 */
2469
Al Viro0e11c912006-11-08 00:26:29 -08002470static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471{
2472 struct packet_sock *po = pkt_sk(sk);
David S. Millerdc99f602011-07-05 01:45:05 -07002473
Wei Yongjunaef950b2011-12-27 22:32:41 -05002474 if (po->fanout) {
2475 if (dev)
2476 dev_put(dev);
2477
David S. Millerdc99f602011-07-05 01:45:05 -07002478 return -EINVAL;
Wei Yongjunaef950b2011-12-27 22:32:41 -05002479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
2481 lock_sock(sk);
2482
2483 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002484 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 po->num = protocol;
2486 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002487 if (po->prot_hook.dev)
2488 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 po->prot_hook.dev = dev;
2490
2491 po->ifindex = dev ? dev->ifindex : 0;
2492
2493 if (protocol == 0)
2494 goto out_unlock;
2495
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002496 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002497 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002498 } else {
2499 sk->sk_err = ENETDOWN;
2500 if (!sock_flag(sk, SOCK_DEAD))
2501 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 }
2503
2504out_unlock:
2505 spin_unlock(&po->bind_lock);
2506 release_sock(sk);
2507 return 0;
2508}
2509
2510/*
2511 * Bind a packet socket to a device
2512 */
2513
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002514static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2515 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002517 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 char name[15];
2519 struct net_device *dev;
2520 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 /*
2523 * Check legality
2524 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002525
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002526 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002528 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002530 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002531 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return err;
2534}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2537{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002538 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2539 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 struct net_device *dev = NULL;
2541 int err;
2542
2543
2544 /*
2545 * Check legality
2546 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002547
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 if (addr_len < sizeof(struct sockaddr_ll))
2549 return -EINVAL;
2550 if (sll->sll_family != AF_PACKET)
2551 return -EINVAL;
2552
2553 if (sll->sll_ifindex) {
2554 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002555 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 if (dev == NULL)
2557 goto out;
2558 }
2559 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
2561out:
2562 return err;
2563}
2564
2565static struct proto packet_proto = {
2566 .name = "PACKET",
2567 .owner = THIS_MODULE,
2568 .obj_size = sizeof(struct packet_sock),
2569};
2570
2571/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002572 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 */
2574
Eric Paris3f378b62009-11-05 22:18:14 -08002575static int packet_create(struct net *net, struct socket *sock, int protocol,
2576 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577{
2578 struct sock *sk;
2579 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002580 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 int err;
2582
2583 if (!capable(CAP_NET_RAW))
2584 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002585 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2586 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 return -ESOCKTNOSUPPORT;
2588
2589 sock->state = SS_UNCONNECTED;
2590
2591 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002592 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 if (sk == NULL)
2594 goto out;
2595
2596 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 if (sock->type == SOCK_PACKET)
2598 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 sock_init_data(sock, sk);
2601
2602 po = pkt_sk(sk);
2603 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002604 po->num = proto;
Daniel Borkmann63485172013-11-21 16:50:58 +01002605 RCU_INIT_POINTER(po->cached_dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
2607 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002608 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
2610 /*
2611 * Attach a protocol block
2612 */
2613
2614 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002615 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002617
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 if (sock->type == SOCK_PACKET)
2619 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 po->prot_hook.af_packet_priv = sk;
2622
Al Viro0e11c912006-11-08 00:26:29 -08002623 if (proto) {
2624 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002625 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 }
2627
stephen hemminger808f5112010-02-22 07:57:18 +00002628 spin_lock_bh(&net->packet.sklist_lock);
2629 sk_add_node_rcu(sk, &net->packet.sklist);
Eric Dumazet36804532008-11-19 14:25:35 -08002630 sock_prot_inuse_add(net, &packet_proto, 1);
stephen hemminger808f5112010-02-22 07:57:18 +00002631 spin_unlock_bh(&net->packet.sklist_lock);
2632
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002633 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634out:
2635 return err;
2636}
2637
Richard Cochraned85b562010-04-07 22:41:28 +00002638static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2639{
2640 struct sock_exterr_skb *serr;
2641 struct sk_buff *skb, *skb2;
2642 int copied, err;
2643
2644 err = -EAGAIN;
2645 skb = skb_dequeue(&sk->sk_error_queue);
2646 if (skb == NULL)
2647 goto out;
2648
2649 copied = skb->len;
2650 if (copied > len) {
2651 msg->msg_flags |= MSG_TRUNC;
2652 copied = len;
2653 }
2654 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2655 if (err)
2656 goto out_free_skb;
2657
2658 sock_recv_timestamp(msg, sk, skb);
2659
2660 serr = SKB_EXT_ERR(skb);
2661 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2662 sizeof(serr->ee), &serr->ee);
2663
2664 msg->msg_flags |= MSG_ERRQUEUE;
2665 err = copied;
2666
2667 /* Reset and regenerate socket error */
2668 spin_lock_bh(&sk->sk_error_queue.lock);
2669 sk->sk_err = 0;
2670 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2671 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2672 spin_unlock_bh(&sk->sk_error_queue.lock);
2673 sk->sk_error_report(sk);
2674 } else
2675 spin_unlock_bh(&sk->sk_error_queue.lock);
2676
2677out_free_skb:
2678 kfree_skb(skb);
2679out:
2680 return err;
2681}
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683/*
2684 * Pull a packet from our receive queue and hand it to the user.
2685 * If necessary we block.
2686 */
2687
2688static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2689 struct msghdr *msg, size_t len, int flags)
2690{
2691 struct sock *sk = sock->sk;
2692 struct sk_buff *skb;
2693 int copied, err;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002694 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
2696 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002697 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 goto out;
2699
2700#if 0
2701 /* What error should we return now? EUNATTACH? */
2702 if (pkt_sk(sk)->ifindex < 0)
2703 return -ENODEV;
2704#endif
2705
Richard Cochraned85b562010-04-07 22:41:28 +00002706 if (flags & MSG_ERRQUEUE) {
2707 err = packet_recv_error(sk, msg, len);
2708 goto out;
2709 }
2710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 * Call the generic datagram receiver. This handles all sorts
2713 * of horrible races and re-entrancy so we can forget about it
2714 * in the protocol layers.
2715 *
2716 * Now it will return ENETDOWN, if device have just gone down,
2717 * but then it will block.
2718 */
2719
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002720 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721
2722 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002723 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 * handles the blocking we don't see and worry about blocking
2725 * retries.
2726 */
2727
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002728 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 goto out;
2730
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002731 if (pkt_sk(sk)->has_vnet_hdr) {
2732 struct virtio_net_hdr vnet_hdr = { 0 };
2733
2734 err = -EINVAL;
2735 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002736 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002737 goto out_free;
2738
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002739 len -= vnet_hdr_len;
2740
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002741 if (skb_is_gso(skb)) {
2742 struct skb_shared_info *sinfo = skb_shinfo(skb);
2743
2744 /* This is a hint as to how much should be linear. */
2745 vnet_hdr.hdr_len = skb_headlen(skb);
2746 vnet_hdr.gso_size = sinfo->gso_size;
2747 if (sinfo->gso_type & SKB_GSO_TCPV4)
2748 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2749 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2750 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2751 else if (sinfo->gso_type & SKB_GSO_UDP)
2752 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2753 else if (sinfo->gso_type & SKB_GSO_FCOE)
2754 goto out_free;
2755 else
2756 BUG();
2757 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2758 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2759 } else
2760 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2761
2762 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2763 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002764 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002765 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002766 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2767 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002768 } /* else everything is zero */
2769
2770 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2771 vnet_hdr_len);
2772 if (err < 0)
2773 goto out_free;
2774 }
2775
Hannes Frederic Sowa18719a42013-11-21 03:14:22 +01002776 /* You lose any data beyond the buffer you gave. If it worries
2777 * a user program they can ask the device for its MTU
2778 * anyway.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002779 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002781 if (copied > len) {
2782 copied = len;
2783 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 }
2785
2786 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2787 if (err)
2788 goto out_free;
2789
Neil Horman3b885782009-10-12 13:26:31 -07002790 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
Hannes Frederic Sowa18719a42013-11-21 03:14:22 +01002792 if (msg->msg_name) {
2793 /* If the address length field is there to be filled
2794 * in, we fill it in now.
2795 */
2796 if (sock->type == SOCK_PACKET) {
2797 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2798 } else {
2799 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2800 msg->msg_namelen = sll->sll_halen +
2801 offsetof(struct sockaddr_ll, sll_addr);
2802 }
Herbert Xuffbc6112007-02-04 23:33:10 -08002803 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2804 msg->msg_namelen);
Hannes Frederic Sowa18719a42013-11-21 03:14:22 +01002805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Herbert Xu8dc41942007-02-04 23:31:32 -08002807 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002808 struct tpacket_auxdata aux;
2809
2810 aux.tp_status = TP_STATUS_USER;
2811 if (skb->ip_summed == CHECKSUM_PARTIAL)
2812 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2813 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2814 aux.tp_snaplen = skb->len;
2815 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002816 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002817 if (vlan_tx_tag_present(skb)) {
2818 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2819 aux.tp_status |= TP_STATUS_VLAN_VALID;
2820 } else {
2821 aux.tp_vlan_tci = 0;
2822 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002823 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002824 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002825 }
2826
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 /*
2828 * Free or return the buffer as appropriate. Again this
2829 * hides all the races and re-entrancy issues from us.
2830 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002831 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
2833out_free:
2834 skb_free_datagram(sk, skb);
2835out:
2836 return err;
2837}
2838
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2840 int *uaddr_len, int peer)
2841{
2842 struct net_device *dev;
2843 struct sock *sk = sock->sk;
2844
2845 if (peer)
2846 return -EOPNOTSUPP;
2847
2848 uaddr->sa_family = AF_PACKET;
Daniel Borkmann991e73c2013-06-12 16:02:27 +02002849 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
Eric Dumazet654d1f82009-11-02 10:43:32 +01002850 rcu_read_lock();
2851 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2852 if (dev)
Daniel Borkmann991e73c2013-06-12 16:02:27 +02002853 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
Eric Dumazet654d1f82009-11-02 10:43:32 +01002854 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 *uaddr_len = sizeof(*uaddr);
2856
2857 return 0;
2858}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
2860static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2861 int *uaddr_len, int peer)
2862{
2863 struct net_device *dev;
2864 struct sock *sk = sock->sk;
2865 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002866 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 if (peer)
2869 return -EOPNOTSUPP;
2870
2871 sll->sll_family = AF_PACKET;
2872 sll->sll_ifindex = po->ifindex;
2873 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002874 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002875 rcu_read_lock();
2876 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 if (dev) {
2878 sll->sll_hatype = dev->type;
2879 sll->sll_halen = dev->addr_len;
2880 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 } else {
2882 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2883 sll->sll_halen = 0;
2884 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002885 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002886 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
2888 return 0;
2889}
2890
Wang Chen2aeb0b82008-07-14 20:49:46 -07002891static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2892 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893{
2894 switch (i->type) {
2895 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002896 if (i->alen != dev->addr_len)
2897 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 if (what > 0)
Jiri Pirko22bedad2010-04-01 21:22:57 +00002899 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 else
Jiri Pirko22bedad2010-04-01 21:22:57 +00002901 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 break;
2903 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002904 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 break;
2906 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002907 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002909 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002910 if (i->alen != dev->addr_len)
2911 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002912 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002913 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002914 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002915 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002916 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002917 default:
2918 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002920 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921}
2922
2923static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2924{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002925 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 if (i->ifindex == dev->ifindex)
2927 packet_dev_mc(dev, i, what);
2928 }
2929}
2930
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002931static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932{
2933 struct packet_sock *po = pkt_sk(sk);
2934 struct packet_mclist *ml, *i;
2935 struct net_device *dev;
2936 int err;
2937
2938 rtnl_lock();
2939
2940 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002941 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 if (!dev)
2943 goto done;
2944
2945 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002946 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 goto done;
2948
2949 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002950 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 if (i == NULL)
2952 goto done;
2953
2954 err = 0;
2955 for (ml = po->mclist; ml; ml = ml->next) {
2956 if (ml->ifindex == mreq->mr_ifindex &&
2957 ml->type == mreq->mr_type &&
2958 ml->alen == mreq->mr_alen &&
2959 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2960 ml->count++;
2961 /* Free the new element ... */
2962 kfree(i);
2963 goto done;
2964 }
2965 }
2966
2967 i->type = mreq->mr_type;
2968 i->ifindex = mreq->mr_ifindex;
2969 i->alen = mreq->mr_alen;
2970 memcpy(i->addr, mreq->mr_address, i->alen);
2971 i->count = 1;
2972 i->next = po->mclist;
2973 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002974 err = packet_dev_mc(dev, i, 1);
2975 if (err) {
2976 po->mclist = i->next;
2977 kfree(i);
2978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
2980done:
2981 rtnl_unlock();
2982 return err;
2983}
2984
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002985static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986{
2987 struct packet_mclist *ml, **mlp;
2988
2989 rtnl_lock();
2990
2991 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2992 if (ml->ifindex == mreq->mr_ifindex &&
2993 ml->type == mreq->mr_type &&
2994 ml->alen == mreq->mr_alen &&
2995 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2996 if (--ml->count == 0) {
2997 struct net_device *dev;
2998 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00002999 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3000 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 kfree(ml);
3003 }
3004 rtnl_unlock();
3005 return 0;
3006 }
3007 }
3008 rtnl_unlock();
3009 return -EADDRNOTAVAIL;
3010}
3011
3012static void packet_flush_mclist(struct sock *sk)
3013{
3014 struct packet_sock *po = pkt_sk(sk);
3015 struct packet_mclist *ml;
3016
3017 if (!po->mclist)
3018 return;
3019
3020 rtnl_lock();
3021 while ((ml = po->mclist) != NULL) {
3022 struct net_device *dev;
3023
3024 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003025 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3026 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 kfree(ml);
3029 }
3030 rtnl_unlock();
3031}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032
3033static int
David S. Millerb7058842009-09-30 16:12:20 -07003034packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035{
3036 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08003037 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 int ret;
3039
3040 if (level != SOL_PACKET)
3041 return -ENOPROTOOPT;
3042
Johann Baudy69e3c752009-05-18 22:11:22 -07003043 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003044 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 case PACKET_DROP_MEMBERSHIP:
3046 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003047 struct packet_mreq_max mreq;
3048 int len = optlen;
3049 memset(&mreq, 0, sizeof(mreq));
3050 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003052 if (len > sizeof(mreq))
3053 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003054 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003056 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3057 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 if (optname == PACKET_ADD_MEMBERSHIP)
3059 ret = packet_mc_add(sk, &mreq);
3060 else
3061 ret = packet_mc_drop(sk, &mreq);
3062 return ret;
3063 }
David S. Millera2efcfa2007-05-29 13:12:50 -07003064
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07003066 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 {
chetan lokef6fb8f12011-08-19 10:18:16 +00003068 union tpacket_req_u req_u;
3069 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
chetan lokef6fb8f12011-08-19 10:18:16 +00003071 switch (po->tp_version) {
3072 case TPACKET_V1:
3073 case TPACKET_V2:
3074 len = sizeof(req_u.req);
3075 break;
3076 case TPACKET_V3:
3077 default:
3078 len = sizeof(req_u.req3);
3079 break;
3080 }
3081 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003083 if (pkt_sk(sk)->has_vnet_hdr)
3084 return -EINVAL;
chetan lokef6fb8f12011-08-19 10:18:16 +00003085 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 return -EFAULT;
chetan lokef6fb8f12011-08-19 10:18:16 +00003087 return packet_set_ring(sk, &req_u, 0,
3088 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 }
3090 case PACKET_COPY_THRESH:
3091 {
3092 int val;
3093
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003094 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003096 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 return -EFAULT;
3098
3099 pkt_sk(sk)->copy_thresh = val;
3100 return 0;
3101 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003102 case PACKET_VERSION:
3103 {
3104 int val;
3105
3106 if (optlen != sizeof(val))
3107 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003108 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003109 return -EBUSY;
3110 if (copy_from_user(&val, optval, sizeof(val)))
3111 return -EFAULT;
3112 switch (val) {
3113 case TPACKET_V1:
3114 case TPACKET_V2:
chetan lokef6fb8f12011-08-19 10:18:16 +00003115 case TPACKET_V3:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003116 po->tp_version = val;
3117 return 0;
3118 default:
3119 return -EINVAL;
3120 }
3121 }
Patrick McHardy89133362008-07-18 18:05:19 -07003122 case PACKET_RESERVE:
3123 {
3124 unsigned int val;
3125
3126 if (optlen != sizeof(val))
3127 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003128 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardy89133362008-07-18 18:05:19 -07003129 return -EBUSY;
3130 if (copy_from_user(&val, optval, sizeof(val)))
3131 return -EFAULT;
3132 po->tp_reserve = val;
3133 return 0;
3134 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003135 case PACKET_LOSS:
3136 {
3137 unsigned int val;
3138
3139 if (optlen != sizeof(val))
3140 return -EINVAL;
3141 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3142 return -EBUSY;
3143 if (copy_from_user(&val, optval, sizeof(val)))
3144 return -EFAULT;
3145 po->tp_loss = !!val;
3146 return 0;
3147 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003148 case PACKET_AUXDATA:
3149 {
3150 int val;
3151
3152 if (optlen < sizeof(val))
3153 return -EINVAL;
3154 if (copy_from_user(&val, optval, sizeof(val)))
3155 return -EFAULT;
3156
3157 po->auxdata = !!val;
3158 return 0;
3159 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003160 case PACKET_ORIGDEV:
3161 {
3162 int val;
3163
3164 if (optlen < sizeof(val))
3165 return -EINVAL;
3166 if (copy_from_user(&val, optval, sizeof(val)))
3167 return -EFAULT;
3168
3169 po->origdev = !!val;
3170 return 0;
3171 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003172 case PACKET_VNET_HDR:
3173 {
3174 int val;
3175
3176 if (sock->type != SOCK_RAW)
3177 return -EINVAL;
3178 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3179 return -EBUSY;
3180 if (optlen < sizeof(val))
3181 return -EINVAL;
3182 if (copy_from_user(&val, optval, sizeof(val)))
3183 return -EFAULT;
3184
3185 po->has_vnet_hdr = !!val;
3186 return 0;
3187 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003188 case PACKET_TIMESTAMP:
3189 {
3190 int val;
3191
3192 if (optlen != sizeof(val))
3193 return -EINVAL;
3194 if (copy_from_user(&val, optval, sizeof(val)))
3195 return -EFAULT;
3196
3197 po->tp_tstamp = val;
3198 return 0;
3199 }
David S. Millerdc99f602011-07-05 01:45:05 -07003200 case PACKET_FANOUT:
3201 {
3202 int val;
3203
3204 if (optlen != sizeof(val))
3205 return -EINVAL;
3206 if (copy_from_user(&val, optval, sizeof(val)))
3207 return -EFAULT;
3208
3209 return fanout_add(sk, val & 0xffff, val >> 16);
3210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 default:
3212 return -ENOPROTOOPT;
3213 }
3214}
3215
3216static int packet_getsockopt(struct socket *sock, int level, int optname,
3217 char __user *optval, int __user *optlen)
3218{
3219 int len;
Herbert Xu8dc41942007-02-04 23:31:32 -08003220 int val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 struct sock *sk = sock->sk;
3222 struct packet_sock *po = pkt_sk(sk);
Herbert Xu8dc41942007-02-04 23:31:32 -08003223 void *data;
3224 struct tpacket_stats st;
chetan lokef6fb8f12011-08-19 10:18:16 +00003225 union tpacket_stats_u st_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
3227 if (level != SOL_PACKET)
3228 return -ENOPROTOOPT;
3229
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003230 if (get_user(len, optlen))
3231 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
3233 if (len < 0)
3234 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003235
Johann Baudy69e3c752009-05-18 22:11:22 -07003236 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 case PACKET_STATISTICS:
chetan lokef6fb8f12011-08-19 10:18:16 +00003238 if (po->tp_version == TPACKET_V3) {
3239 len = sizeof(struct tpacket_stats_v3);
3240 } else {
3241 if (len > sizeof(struct tpacket_stats))
3242 len = sizeof(struct tpacket_stats);
3243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 spin_lock_bh(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003245 if (po->tp_version == TPACKET_V3) {
3246 memcpy(&st_u.stats3, &po->stats,
3247 sizeof(struct tpacket_stats));
3248 st_u.stats3.tp_freeze_q_cnt =
3249 po->stats_u.stats3.tp_freeze_q_cnt;
3250 st_u.stats3.tp_packets += po->stats.tp_drops;
3251 data = &st_u.stats3;
3252 } else {
3253 st = po->stats;
3254 st.tp_packets += st.tp_drops;
3255 data = &st;
3256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 memset(&po->stats, 0, sizeof(st));
3258 spin_unlock_bh(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003260 case PACKET_AUXDATA:
3261 if (len > sizeof(int))
3262 len = sizeof(int);
3263 val = po->auxdata;
3264
3265 data = &val;
3266 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003267 case PACKET_ORIGDEV:
3268 if (len > sizeof(int))
3269 len = sizeof(int);
3270 val = po->origdev;
3271
3272 data = &val;
3273 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003274 case PACKET_VNET_HDR:
3275 if (len > sizeof(int))
3276 len = sizeof(int);
3277 val = po->has_vnet_hdr;
3278
3279 data = &val;
3280 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003281 case PACKET_VERSION:
3282 if (len > sizeof(int))
3283 len = sizeof(int);
3284 val = po->tp_version;
3285 data = &val;
3286 break;
3287 case PACKET_HDRLEN:
3288 if (len > sizeof(int))
3289 len = sizeof(int);
3290 if (copy_from_user(&val, optval, len))
3291 return -EFAULT;
3292 switch (val) {
3293 case TPACKET_V1:
3294 val = sizeof(struct tpacket_hdr);
3295 break;
3296 case TPACKET_V2:
3297 val = sizeof(struct tpacket2_hdr);
3298 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003299 case TPACKET_V3:
3300 val = sizeof(struct tpacket3_hdr);
3301 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003302 default:
3303 return -EINVAL;
3304 }
3305 data = &val;
3306 break;
Patrick McHardy89133362008-07-18 18:05:19 -07003307 case PACKET_RESERVE:
3308 if (len > sizeof(unsigned int))
3309 len = sizeof(unsigned int);
3310 val = po->tp_reserve;
3311 data = &val;
3312 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003313 case PACKET_LOSS:
3314 if (len > sizeof(unsigned int))
3315 len = sizeof(unsigned int);
3316 val = po->tp_loss;
3317 data = &val;
3318 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003319 case PACKET_TIMESTAMP:
3320 if (len > sizeof(int))
3321 len = sizeof(int);
3322 val = po->tp_tstamp;
3323 data = &val;
3324 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003325 case PACKET_FANOUT:
3326 if (len > sizeof(int))
3327 len = sizeof(int);
3328 val = (po->fanout ?
3329 ((u32)po->fanout->id |
3330 ((u32)po->fanout->type << 16)) :
3331 0);
3332 data = &val;
3333 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 default:
3335 return -ENOPROTOOPT;
3336 }
3337
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003338 if (put_user(len, optlen))
3339 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003340 if (copy_to_user(optval, data, len))
3341 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003342 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343}
3344
3345
3346static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3347{
3348 struct sock *sk;
3349 struct hlist_node *node;
Jason Lunzad930652007-02-20 23:19:54 -08003350 struct net_device *dev = data;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003351 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
stephen hemminger808f5112010-02-22 07:57:18 +00003353 rcu_read_lock();
3354 sk_for_each_rcu(sk, node, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 struct packet_sock *po = pkt_sk(sk);
3356
3357 switch (msg) {
3358 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 if (po->mclist)
3360 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003361 /* fallthrough */
3362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 case NETDEV_DOWN:
3364 if (dev->ifindex == po->ifindex) {
3365 spin_lock(&po->bind_lock);
3366 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003367 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 sk->sk_err = ENETDOWN;
3369 if (!sock_flag(sk, SOCK_DEAD))
3370 sk->sk_error_report(sk);
3371 }
3372 if (msg == NETDEV_UNREGISTER) {
3373 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003374 if (po->prot_hook.dev)
3375 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 po->prot_hook.dev = NULL;
3377 }
3378 spin_unlock(&po->bind_lock);
3379 }
3380 break;
3381 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003382 if (dev->ifindex == po->ifindex) {
3383 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003384 if (po->num)
3385 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003386 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 break;
3389 }
3390 }
stephen hemminger808f5112010-02-22 07:57:18 +00003391 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 return NOTIFY_DONE;
3393}
3394
3395
3396static int packet_ioctl(struct socket *sock, unsigned int cmd,
3397 unsigned long arg)
3398{
3399 struct sock *sk = sock->sk;
3400
Johann Baudy69e3c752009-05-18 22:11:22 -07003401 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003402 case SIOCOUTQ:
3403 {
3404 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003405
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003406 return put_user(amount, (int __user *)arg);
3407 }
3408 case SIOCINQ:
3409 {
3410 struct sk_buff *skb;
3411 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003413 spin_lock_bh(&sk->sk_receive_queue.lock);
3414 skb = skb_peek(&sk->sk_receive_queue);
3415 if (skb)
3416 amount = skb->len;
3417 spin_unlock_bh(&sk->sk_receive_queue.lock);
3418 return put_user(amount, (int __user *)arg);
3419 }
3420 case SIOCGSTAMP:
3421 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3422 case SIOCGSTAMPNS:
3423 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003424
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003426 case SIOCADDRT:
3427 case SIOCDELRT:
3428 case SIOCDARP:
3429 case SIOCGARP:
3430 case SIOCSARP:
3431 case SIOCGIFADDR:
3432 case SIOCSIFADDR:
3433 case SIOCGIFBRDADDR:
3434 case SIOCSIFBRDADDR:
3435 case SIOCGIFNETMASK:
3436 case SIOCSIFNETMASK:
3437 case SIOCGIFDSTADDR:
3438 case SIOCSIFDSTADDR:
3439 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003440 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441#endif
3442
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003443 default:
3444 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 }
3446 return 0;
3447}
3448
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003449static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 poll_table *wait)
3451{
3452 struct sock *sk = sock->sk;
3453 struct packet_sock *po = pkt_sk(sk);
3454 unsigned int mask = datagram_poll(file, sock, wait);
3455
3456 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003457 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f12011-08-19 10:18:16 +00003458 if (!packet_previous_rx_frame(po, &po->rx_ring,
3459 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 mask |= POLLIN | POLLRDNORM;
3461 }
3462 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003463 spin_lock_bh(&sk->sk_write_queue.lock);
3464 if (po->tx_ring.pg_vec) {
3465 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3466 mask |= POLLOUT | POLLWRNORM;
3467 }
3468 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 return mask;
3470}
3471
3472
3473/* Dirty? Well, I still did not learn better way to account
3474 * for user mmaps.
3475 */
3476
3477static void packet_mm_open(struct vm_area_struct *vma)
3478{
3479 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003480 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003482
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 if (sk)
3484 atomic_inc(&pkt_sk(sk)->mapped);
3485}
3486
3487static void packet_mm_close(struct vm_area_struct *vma)
3488{
3489 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003490 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003492
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 if (sk)
3494 atomic_dec(&pkt_sk(sk)->mapped);
3495}
3496
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003497static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003498 .open = packet_mm_open,
3499 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500};
3501
Neil Horman0e3125c2010-11-16 10:26:47 -08003502static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3503 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 int i;
3506
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003507 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003508 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003509 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003510 vfree(pg_vec[i].buffer);
3511 else
3512 free_pages((unsigned long)pg_vec[i].buffer,
3513 order);
3514 pg_vec[i].buffer = NULL;
3515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 }
3517 kfree(pg_vec);
3518}
3519
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003520static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003521{
Neil Horman0e3125c2010-11-16 10:26:47 -08003522 char *buffer = NULL;
3523 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3524 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003525
Neil Horman0e3125c2010-11-16 10:26:47 -08003526 buffer = (char *) __get_free_pages(gfp_flags, order);
3527
3528 if (buffer)
3529 return buffer;
3530
3531 /*
3532 * __get_free_pages failed, fall back to vmalloc
3533 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003534 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003535
3536 if (buffer)
3537 return buffer;
3538
3539 /*
3540 * vmalloc failed, lets dig into swap here
3541 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003542 gfp_flags &= ~__GFP_NORETRY;
3543 buffer = (char *)__get_free_pages(gfp_flags, order);
3544 if (buffer)
3545 return buffer;
3546
3547 /*
3548 * complete and utter failure
3549 */
3550 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003551}
3552
Neil Horman0e3125c2010-11-16 10:26:47 -08003553static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003554{
3555 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003556 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003557 int i;
3558
Neil Horman0e3125c2010-11-16 10:26:47 -08003559 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003560 if (unlikely(!pg_vec))
3561 goto out;
3562
3563 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003564 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003565 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003566 goto out_free_pgvec;
3567 }
3568
3569out:
3570 return pg_vec;
3571
3572out_free_pgvec:
3573 free_pg_vec(pg_vec, order, block_nr);
3574 pg_vec = NULL;
3575 goto out;
3576}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
chetan lokef6fb8f12011-08-19 10:18:16 +00003578static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003579 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580{
Neil Horman0e3125c2010-11-16 10:26:47 -08003581 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003583 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003584 struct packet_ring_buffer *rb;
3585 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003586 __be16 num;
chetan lokef6fb8f12011-08-19 10:18:16 +00003587 int err = -EINVAL;
3588 /* Added to avoid minimal code churn */
3589 struct tpacket_req *req = &req_u->req;
3590
3591 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3592 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3593 WARN(1, "Tx-ring is not supported.\n");
3594 goto out;
3595 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003596
3597 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3598 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3599
3600 err = -EBUSY;
3601 if (!closing) {
3602 if (atomic_read(&po->mapped))
3603 goto out;
3604 if (atomic_read(&rb->pending))
3605 goto out;
3606 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003610 err = -EBUSY;
3611 if (unlikely(rb->pg_vec))
3612 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003614 switch (po->tp_version) {
3615 case TPACKET_V1:
3616 po->tp_hdrlen = TPACKET_HDRLEN;
3617 break;
3618 case TPACKET_V2:
3619 po->tp_hdrlen = TPACKET2_HDRLEN;
3620 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003621 case TPACKET_V3:
3622 po->tp_hdrlen = TPACKET3_HDRLEN;
3623 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003624 }
3625
Johann Baudy69e3c752009-05-18 22:11:22 -07003626 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003627 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003628 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003629 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003630 goto out;
Patrick McHardy89133362008-07-18 18:05:19 -07003631 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003632 po->tp_reserve))
3633 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003634 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003635 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636
Johann Baudy69e3c752009-05-18 22:11:22 -07003637 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3638 if (unlikely(rb->frames_per_block <= 0))
3639 goto out;
3640 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3641 req->tp_frame_nr))
3642 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
3644 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003645 order = get_order(req->tp_block_size);
3646 pg_vec = alloc_pg_vec(req, order);
3647 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 goto out;
chetan lokef6fb8f12011-08-19 10:18:16 +00003649 switch (po->tp_version) {
3650 case TPACKET_V3:
3651 /* Transmit path is not supported. We checked
3652 * it above but just being paranoid
3653 */
3654 if (!tx_ring)
3655 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3656 break;
3657 default:
3658 break;
3659 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003660 }
3661 /* Done */
3662 else {
3663 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003664 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003665 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 }
3667
3668 lock_sock(sk);
3669
3670 /* Detach socket from network */
3671 spin_lock(&po->bind_lock);
3672 was_running = po->running;
3673 num = po->num;
3674 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003676 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 }
3678 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003679
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 synchronize_net();
3681
3682 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003683 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 if (closing || atomic_read(&po->mapped) == 0) {
3685 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003686 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003687 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003688 rb->frame_max = (req->tp_frame_nr - 1);
3689 rb->head = 0;
3690 rb->frame_size = req->tp_frame_size;
3691 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692
Changli Gaoc053fd92010-12-10 16:02:20 -08003693 swap(rb->pg_vec_order, order);
3694 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
Johann Baudy69e3c752009-05-18 22:11:22 -07003696 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3697 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3698 tpacket_rcv : packet_rcv;
3699 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003701 pr_err("packet_mmap: vma is busy: %d\n",
3702 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 }
Herbert Xu905db442009-01-30 14:12:06 -08003704 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
3706 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003707 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003709 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 }
3711 spin_unlock(&po->bind_lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003712 if (closing && (po->tp_version > TPACKET_V2)) {
3713 /* Because we don't support block-based V3 on tx-ring */
3714 if (!tx_ring)
3715 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 release_sock(sk);
3718
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 if (pg_vec)
3720 free_pg_vec(pg_vec, order, req->tp_block_nr);
3721out:
3722 return err;
3723}
3724
Johann Baudy69e3c752009-05-18 22:11:22 -07003725static int packet_mmap(struct file *file, struct socket *sock,
3726 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
3728 struct sock *sk = sock->sk;
3729 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003730 unsigned long size, expected_size;
3731 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 unsigned long start;
3733 int err = -EINVAL;
3734 int i;
3735
3736 if (vma->vm_pgoff)
3737 return -EINVAL;
3738
Herbert Xu905db442009-01-30 14:12:06 -08003739 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003740
3741 expected_size = 0;
3742 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3743 if (rb->pg_vec) {
3744 expected_size += rb->pg_vec_len
3745 * rb->pg_vec_pages
3746 * PAGE_SIZE;
3747 }
3748 }
3749
3750 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003752
3753 size = vma->vm_end - vma->vm_start;
3754 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 goto out;
3756
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003758 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3759 if (rb->pg_vec == NULL)
3760 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003761
Johann Baudy69e3c752009-05-18 22:11:22 -07003762 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003763 struct page *page;
3764 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003765 int pg_num;
3766
Changli Gaoc56b4d92010-12-01 02:52:57 +00003767 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3768 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003769 err = vm_insert_page(vma, start, page);
3770 if (unlikely(err))
3771 goto out;
3772 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003773 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003774 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003777
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003778 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779 vma->vm_ops = &packet_mmap_ops;
3780 err = 0;
3781
3782out:
Herbert Xu905db442009-01-30 14:12:06 -08003783 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 return err;
3785}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003787static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788 .family = PF_PACKET,
3789 .owner = THIS_MODULE,
3790 .release = packet_release,
3791 .bind = packet_bind_spkt,
3792 .connect = sock_no_connect,
3793 .socketpair = sock_no_socketpair,
3794 .accept = sock_no_accept,
3795 .getname = packet_getname_spkt,
3796 .poll = datagram_poll,
3797 .ioctl = packet_ioctl,
3798 .listen = sock_no_listen,
3799 .shutdown = sock_no_shutdown,
3800 .setsockopt = sock_no_setsockopt,
3801 .getsockopt = sock_no_getsockopt,
3802 .sendmsg = packet_sendmsg_spkt,
3803 .recvmsg = packet_recvmsg,
3804 .mmap = sock_no_mmap,
3805 .sendpage = sock_no_sendpage,
3806};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003808static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 .family = PF_PACKET,
3810 .owner = THIS_MODULE,
3811 .release = packet_release,
3812 .bind = packet_bind,
3813 .connect = sock_no_connect,
3814 .socketpair = sock_no_socketpair,
3815 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003816 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 .poll = packet_poll,
3818 .ioctl = packet_ioctl,
3819 .listen = sock_no_listen,
3820 .shutdown = sock_no_shutdown,
3821 .setsockopt = packet_setsockopt,
3822 .getsockopt = packet_getsockopt,
3823 .sendmsg = packet_sendmsg,
3824 .recvmsg = packet_recvmsg,
3825 .mmap = packet_mmap,
3826 .sendpage = sock_no_sendpage,
3827};
3828
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003829static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 .family = PF_PACKET,
3831 .create = packet_create,
3832 .owner = THIS_MODULE,
3833};
3834
3835static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003836 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837};
3838
3839#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
3841static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003842 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843{
Denis V. Luneve372c412007-11-19 22:31:54 -08003844 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003845
3846 rcu_read_lock();
3847 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848}
3849
3850static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3851{
Herbert Xu1bf40952007-12-16 14:04:02 -08003852 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003853 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854}
3855
3856static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003857 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858{
stephen hemminger808f5112010-02-22 07:57:18 +00003859 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860}
3861
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003862static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863{
3864 if (v == SEQ_START_TOKEN)
3865 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3866 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003867 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868 const struct packet_sock *po = pkt_sk(s);
3869
3870 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003871 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 s,
3873 atomic_read(&s->sk_refcnt),
3874 s->sk_type,
3875 ntohs(po->num),
3876 po->ifindex,
3877 po->running,
3878 atomic_read(&s->sk_rmem_alloc),
3879 sock_i_uid(s),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003880 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882
3883 return 0;
3884}
3885
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003886static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 .start = packet_seq_start,
3888 .next = packet_seq_next,
3889 .stop = packet_seq_stop,
3890 .show = packet_seq_show,
3891};
3892
3893static int packet_seq_open(struct inode *inode, struct file *file)
3894{
Denis V. Luneve372c412007-11-19 22:31:54 -08003895 return seq_open_net(inode, file, &packet_seq_ops,
3896 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897}
3898
Arjan van de Venda7071d2007-02-12 00:55:36 -08003899static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 .owner = THIS_MODULE,
3901 .open = packet_seq_open,
3902 .read = seq_read,
3903 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003904 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905};
3906
3907#endif
3908
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003909static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003910{
stephen hemminger808f5112010-02-22 07:57:18 +00003911 spin_lock_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003912 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003913
3914 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3915 return -ENOMEM;
3916
3917 return 0;
3918}
3919
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003920static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003921{
3922 proc_net_remove(net, "packet");
3923}
3924
3925static struct pernet_operations packet_net_ops = {
3926 .init = packet_net_init,
3927 .exit = packet_net_exit,
3928};
3929
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931static void __exit packet_exit(void)
3932{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003934 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 sock_unregister(PF_PACKET);
3936 proto_unregister(&packet_proto);
3937}
3938
3939static int __init packet_init(void)
3940{
3941 int rc = proto_register(&packet_proto, 0);
3942
3943 if (rc != 0)
3944 goto out;
3945
3946 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003947 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949out:
3950 return rc;
3951}
3952
3953module_init(packet_init);
3954module_exit(packet_exit);
3955MODULE_LICENSE("GPL");
3956MODULE_ALIAS_NETPROTO(PF_PACKET);