blob: 4ee392066148e771925f601c5159f6d0960776af [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/module.h>
21
22#include <linux/kernel.h>
23#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/errno.h>
27#include <linux/string.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
30#include <linux/un.h>
31#include <linux/fcntl.h>
32#include <linux/termios.h>
33#include <linux/sockios.h>
34#include <linux/net.h>
35#include <linux/fs.h>
36#include <linux/slab.h>
37#include <asm/uaccess.h>
38#include <linux/skbuff.h>
39#include <linux/netdevice.h>
40#include <linux/rtnetlink.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
43#include <linux/smp_lock.h>
44#include <linux/notifier.h>
45#include <linux/security.h>
46#include <linux/jhash.h>
47#include <linux/jiffies.h>
48#include <linux/random.h>
49#include <linux/bitops.h>
50#include <linux/mm.h>
51#include <linux/types.h>
52#include <net/sock.h>
53#include <net/scm.h>
54
55#define Nprintk(a...)
56
57struct netlink_sock {
58 /* struct sock has to be the first member of netlink_sock */
59 struct sock sk;
60 u32 pid;
61 unsigned int groups;
62 u32 dst_pid;
63 unsigned int dst_groups;
64 unsigned long state;
65 wait_queue_head_t wait;
66 struct netlink_callback *cb;
67 spinlock_t cb_lock;
68 void (*data_ready)(struct sock *sk, int bytes);
69};
70
71static inline struct netlink_sock *nlk_sk(struct sock *sk)
72{
73 return (struct netlink_sock *)sk;
74}
75
76struct nl_pid_hash {
77 struct hlist_head *table;
78 unsigned long rehash_time;
79
80 unsigned int mask;
81 unsigned int shift;
82
83 unsigned int entries;
84 unsigned int max_shift;
85
86 u32 rnd;
87};
88
89struct netlink_table {
90 struct nl_pid_hash hash;
91 struct hlist_head mc_list;
92 unsigned int nl_nonroot;
93};
94
95static struct netlink_table *nl_table;
96
97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
98
99static int netlink_dump(struct sock *sk);
100static void netlink_destroy_callback(struct netlink_callback *cb);
101
102static DEFINE_RWLOCK(nl_table_lock);
103static atomic_t nl_table_users = ATOMIC_INIT(0);
104
105static struct notifier_block *netlink_chain;
106
107static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
108{
109 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
110}
111
112static void netlink_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115
116 if (!sock_flag(sk, SOCK_DEAD)) {
117 printk("Freeing alive netlink socket %p\n", sk);
118 return;
119 }
120 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
121 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
122 BUG_TRAP(!nlk_sk(sk)->cb);
123}
124
125/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
126 * Look, when several writers sleep and reader wakes them up, all but one
127 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
128 * this, _but_ remember, it adds useless work on UP machines.
129 */
130
131static void netlink_table_grab(void)
132{
133 write_lock_bh(&nl_table_lock);
134
135 if (atomic_read(&nl_table_users)) {
136 DECLARE_WAITQUEUE(wait, current);
137
138 add_wait_queue_exclusive(&nl_table_wait, &wait);
139 for(;;) {
140 set_current_state(TASK_UNINTERRUPTIBLE);
141 if (atomic_read(&nl_table_users) == 0)
142 break;
143 write_unlock_bh(&nl_table_lock);
144 schedule();
145 write_lock_bh(&nl_table_lock);
146 }
147
148 __set_current_state(TASK_RUNNING);
149 remove_wait_queue(&nl_table_wait, &wait);
150 }
151}
152
153static __inline__ void netlink_table_ungrab(void)
154{
155 write_unlock_bh(&nl_table_lock);
156 wake_up(&nl_table_wait);
157}
158
159static __inline__ void
160netlink_lock_table(void)
161{
162 /* read_lock() synchronizes us to netlink_table_grab */
163
164 read_lock(&nl_table_lock);
165 atomic_inc(&nl_table_users);
166 read_unlock(&nl_table_lock);
167}
168
169static __inline__ void
170netlink_unlock_table(void)
171{
172 if (atomic_dec_and_test(&nl_table_users))
173 wake_up(&nl_table_wait);
174}
175
176static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
177{
178 struct nl_pid_hash *hash = &nl_table[protocol].hash;
179 struct hlist_head *head;
180 struct sock *sk;
181 struct hlist_node *node;
182
183 read_lock(&nl_table_lock);
184 head = nl_pid_hashfn(hash, pid);
185 sk_for_each(sk, node, head) {
186 if (nlk_sk(sk)->pid == pid) {
187 sock_hold(sk);
188 goto found;
189 }
190 }
191 sk = NULL;
192found:
193 read_unlock(&nl_table_lock);
194 return sk;
195}
196
197static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
198{
199 if (size <= PAGE_SIZE)
200 return kmalloc(size, GFP_ATOMIC);
201 else
202 return (struct hlist_head *)
203 __get_free_pages(GFP_ATOMIC, get_order(size));
204}
205
206static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
207{
208 if (size <= PAGE_SIZE)
209 kfree(table);
210 else
211 free_pages((unsigned long)table, get_order(size));
212}
213
214static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
215{
216 unsigned int omask, mask, shift;
217 size_t osize, size;
218 struct hlist_head *otable, *table;
219 int i;
220
221 omask = mask = hash->mask;
222 osize = size = (mask + 1) * sizeof(*table);
223 shift = hash->shift;
224
225 if (grow) {
226 if (++shift > hash->max_shift)
227 return 0;
228 mask = mask * 2 + 1;
229 size *= 2;
230 }
231
232 table = nl_pid_hash_alloc(size);
233 if (!table)
234 return 0;
235
236 memset(table, 0, size);
237 otable = hash->table;
238 hash->table = table;
239 hash->mask = mask;
240 hash->shift = shift;
241 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
242
243 for (i = 0; i <= omask; i++) {
244 struct sock *sk;
245 struct hlist_node *node, *tmp;
246
247 sk_for_each_safe(sk, node, tmp, &otable[i])
248 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
249 }
250
251 nl_pid_hash_free(otable, osize);
252 hash->rehash_time = jiffies + 10 * 60 * HZ;
253 return 1;
254}
255
256static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
257{
258 int avg = hash->entries >> hash->shift;
259
260 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
261 return 1;
262
263 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
264 nl_pid_hash_rehash(hash, 0);
265 return 1;
266 }
267
268 return 0;
269}
270
271static struct proto_ops netlink_ops;
272
273static int netlink_insert(struct sock *sk, u32 pid)
274{
275 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
276 struct hlist_head *head;
277 int err = -EADDRINUSE;
278 struct sock *osk;
279 struct hlist_node *node;
280 int len;
281
282 netlink_table_grab();
283 head = nl_pid_hashfn(hash, pid);
284 len = 0;
285 sk_for_each(osk, node, head) {
286 if (nlk_sk(osk)->pid == pid)
287 break;
288 len++;
289 }
290 if (node)
291 goto err;
292
293 err = -EBUSY;
294 if (nlk_sk(sk)->pid)
295 goto err;
296
297 err = -ENOMEM;
298 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
299 goto err;
300
301 if (len && nl_pid_hash_dilute(hash, len))
302 head = nl_pid_hashfn(hash, pid);
303 hash->entries++;
304 nlk_sk(sk)->pid = pid;
305 sk_add_node(sk, head);
306 err = 0;
307
308err:
309 netlink_table_ungrab();
310 return err;
311}
312
313static void netlink_remove(struct sock *sk)
314{
315 netlink_table_grab();
316 nl_table[sk->sk_protocol].hash.entries--;
317 sk_del_node_init(sk);
318 if (nlk_sk(sk)->groups)
319 __sk_del_bind_node(sk);
320 netlink_table_ungrab();
321}
322
323static struct proto netlink_proto = {
324 .name = "NETLINK",
325 .owner = THIS_MODULE,
326 .obj_size = sizeof(struct netlink_sock),
327};
328
329static int netlink_create(struct socket *sock, int protocol)
330{
331 struct sock *sk;
332 struct netlink_sock *nlk;
333
334 sock->state = SS_UNCONNECTED;
335
336 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
337 return -ESOCKTNOSUPPORT;
338
339 if (protocol<0 || protocol >= MAX_LINKS)
340 return -EPROTONOSUPPORT;
341
342 sock->ops = &netlink_ops;
343
344 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
345 if (!sk)
346 return -ENOMEM;
347
348 sock_init_data(sock, sk);
349
350 nlk = nlk_sk(sk);
351
352 spin_lock_init(&nlk->cb_lock);
353 init_waitqueue_head(&nlk->wait);
354 sk->sk_destruct = netlink_sock_destruct;
355
356 sk->sk_protocol = protocol;
357 return 0;
358}
359
360static int netlink_release(struct socket *sock)
361{
362 struct sock *sk = sock->sk;
363 struct netlink_sock *nlk;
364
365 if (!sk)
366 return 0;
367
368 netlink_remove(sk);
369 nlk = nlk_sk(sk);
370
371 spin_lock(&nlk->cb_lock);
372 if (nlk->cb) {
373 nlk->cb->done(nlk->cb);
374 netlink_destroy_callback(nlk->cb);
375 nlk->cb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 }
377 spin_unlock(&nlk->cb_lock);
378
379 /* OK. Socket is unlinked, and, therefore,
380 no new packets will arrive */
381
382 sock_orphan(sk);
383 sock->sk = NULL;
384 wake_up_interruptible_all(&nlk->wait);
385
386 skb_queue_purge(&sk->sk_write_queue);
387
388 if (nlk->pid && !nlk->groups) {
389 struct netlink_notify n = {
390 .protocol = sk->sk_protocol,
391 .pid = nlk->pid,
392 };
393 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
394 }
395
396 sock_put(sk);
397 return 0;
398}
399
400static int netlink_autobind(struct socket *sock)
401{
402 struct sock *sk = sock->sk;
403 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
404 struct hlist_head *head;
405 struct sock *osk;
406 struct hlist_node *node;
407 s32 pid = current->pid;
408 int err;
409 static s32 rover = -4097;
410
411retry:
412 cond_resched();
413 netlink_table_grab();
414 head = nl_pid_hashfn(hash, pid);
415 sk_for_each(osk, node, head) {
416 if (nlk_sk(osk)->pid == pid) {
417 /* Bind collision, search negative pid values. */
418 pid = rover--;
419 if (rover > -4097)
420 rover = -4097;
421 netlink_table_ungrab();
422 goto retry;
423 }
424 }
425 netlink_table_ungrab();
426
427 err = netlink_insert(sk, pid);
428 if (err == -EADDRINUSE)
429 goto retry;
430 return 0;
431}
432
433static inline int netlink_capable(struct socket *sock, unsigned int flag)
434{
435 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
436 capable(CAP_NET_ADMIN);
437}
438
439static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
440{
441 struct sock *sk = sock->sk;
442 struct netlink_sock *nlk = nlk_sk(sk);
443 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
444 int err;
445
446 if (nladdr->nl_family != AF_NETLINK)
447 return -EINVAL;
448
449 /* Only superuser is allowed to listen multicasts */
450 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
451 return -EPERM;
452
453 if (nlk->pid) {
454 if (nladdr->nl_pid != nlk->pid)
455 return -EINVAL;
456 } else {
457 err = nladdr->nl_pid ?
458 netlink_insert(sk, nladdr->nl_pid) :
459 netlink_autobind(sock);
460 if (err)
461 return err;
462 }
463
464 if (!nladdr->nl_groups && !nlk->groups)
465 return 0;
466
467 netlink_table_grab();
468 if (nlk->groups && !nladdr->nl_groups)
469 __sk_del_bind_node(sk);
470 else if (!nlk->groups && nladdr->nl_groups)
471 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
472 nlk->groups = nladdr->nl_groups;
473 netlink_table_ungrab();
474
475 return 0;
476}
477
478static int netlink_connect(struct socket *sock, struct sockaddr *addr,
479 int alen, int flags)
480{
481 int err = 0;
482 struct sock *sk = sock->sk;
483 struct netlink_sock *nlk = nlk_sk(sk);
484 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
485
486 if (addr->sa_family == AF_UNSPEC) {
487 sk->sk_state = NETLINK_UNCONNECTED;
488 nlk->dst_pid = 0;
489 nlk->dst_groups = 0;
490 return 0;
491 }
492 if (addr->sa_family != AF_NETLINK)
493 return -EINVAL;
494
495 /* Only superuser is allowed to send multicasts */
496 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
497 return -EPERM;
498
499 if (!nlk->pid)
500 err = netlink_autobind(sock);
501
502 if (err == 0) {
503 sk->sk_state = NETLINK_CONNECTED;
504 nlk->dst_pid = nladdr->nl_pid;
505 nlk->dst_groups = nladdr->nl_groups;
506 }
507
508 return err;
509}
510
511static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
512{
513 struct sock *sk = sock->sk;
514 struct netlink_sock *nlk = nlk_sk(sk);
515 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
516
517 nladdr->nl_family = AF_NETLINK;
518 nladdr->nl_pad = 0;
519 *addr_len = sizeof(*nladdr);
520
521 if (peer) {
522 nladdr->nl_pid = nlk->dst_pid;
523 nladdr->nl_groups = nlk->dst_groups;
524 } else {
525 nladdr->nl_pid = nlk->pid;
526 nladdr->nl_groups = nlk->groups;
527 }
528 return 0;
529}
530
531static void netlink_overrun(struct sock *sk)
532{
533 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
534 sk->sk_err = ENOBUFS;
535 sk->sk_error_report(sk);
536 }
537}
538
539static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
540{
541 int protocol = ssk->sk_protocol;
542 struct sock *sock;
543 struct netlink_sock *nlk;
544
545 sock = netlink_lookup(protocol, pid);
546 if (!sock)
547 return ERR_PTR(-ECONNREFUSED);
548
549 /* Don't bother queuing skb if kernel socket has no input function */
550 nlk = nlk_sk(sock);
551 if ((nlk->pid == 0 && !nlk->data_ready) ||
552 (sock->sk_state == NETLINK_CONNECTED &&
553 nlk->dst_pid != nlk_sk(ssk)->pid)) {
554 sock_put(sock);
555 return ERR_PTR(-ECONNREFUSED);
556 }
557 return sock;
558}
559
560struct sock *netlink_getsockbyfilp(struct file *filp)
561{
562 struct inode *inode = filp->f_dentry->d_inode;
563 struct sock *sock;
564
565 if (!S_ISSOCK(inode->i_mode))
566 return ERR_PTR(-ENOTSOCK);
567
568 sock = SOCKET_I(inode)->sk;
569 if (sock->sk_family != AF_NETLINK)
570 return ERR_PTR(-EINVAL);
571
572 sock_hold(sock);
573 return sock;
574}
575
576/*
577 * Attach a skb to a netlink socket.
578 * The caller must hold a reference to the destination socket. On error, the
579 * reference is dropped. The skb is not send to the destination, just all
580 * all error checks are performed and memory in the queue is reserved.
581 * Return values:
582 * < 0: error. skb freed, reference to sock dropped.
583 * 0: continue
584 * 1: repeat lookup - reference dropped while waiting for socket memory.
585 */
586int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
587{
588 struct netlink_sock *nlk;
589
590 nlk = nlk_sk(sk);
591
592 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
593 test_bit(0, &nlk->state)) {
594 DECLARE_WAITQUEUE(wait, current);
595 if (!timeo) {
596 if (!nlk->pid)
597 netlink_overrun(sk);
598 sock_put(sk);
599 kfree_skb(skb);
600 return -EAGAIN;
601 }
602
603 __set_current_state(TASK_INTERRUPTIBLE);
604 add_wait_queue(&nlk->wait, &wait);
605
606 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
607 test_bit(0, &nlk->state)) &&
608 !sock_flag(sk, SOCK_DEAD))
609 timeo = schedule_timeout(timeo);
610
611 __set_current_state(TASK_RUNNING);
612 remove_wait_queue(&nlk->wait, &wait);
613 sock_put(sk);
614
615 if (signal_pending(current)) {
616 kfree_skb(skb);
617 return sock_intr_errno(timeo);
618 }
619 return 1;
620 }
621 skb_set_owner_r(skb, sk);
622 return 0;
623}
624
625int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
626{
627 struct netlink_sock *nlk;
628 int len = skb->len;
629
630 nlk = nlk_sk(sk);
631
632 skb_queue_tail(&sk->sk_receive_queue, skb);
633 sk->sk_data_ready(sk, len);
634 sock_put(sk);
635 return len;
636}
637
638void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
639{
640 kfree_skb(skb);
641 sock_put(sk);
642}
643
644static inline struct sk_buff *netlink_trim(struct sk_buff *skb, int allocation)
645{
646 int delta;
647
648 skb_orphan(skb);
649
650 delta = skb->end - skb->tail;
651 if (delta * 2 < skb->truesize)
652 return skb;
653
654 if (skb_shared(skb)) {
655 struct sk_buff *nskb = skb_clone(skb, allocation);
656 if (!nskb)
657 return skb;
658 kfree_skb(skb);
659 skb = nskb;
660 }
661
662 if (!pskb_expand_head(skb, 0, -delta, allocation))
663 skb->truesize -= delta;
664
665 return skb;
666}
667
668int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
669{
670 struct sock *sk;
671 int err;
672 long timeo;
673
674 skb = netlink_trim(skb, gfp_any());
675
676 timeo = sock_sndtimeo(ssk, nonblock);
677retry:
678 sk = netlink_getsockbypid(ssk, pid);
679 if (IS_ERR(sk)) {
680 kfree_skb(skb);
681 return PTR_ERR(sk);
682 }
683 err = netlink_attachskb(sk, skb, nonblock, timeo);
684 if (err == 1)
685 goto retry;
686 if (err)
687 return err;
688
689 return netlink_sendskb(sk, skb, ssk->sk_protocol);
690}
691
692static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
693{
694 struct netlink_sock *nlk = nlk_sk(sk);
695
696 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
697 !test_bit(0, &nlk->state)) {
698 skb_set_owner_r(skb, sk);
699 skb_queue_tail(&sk->sk_receive_queue, skb);
700 sk->sk_data_ready(sk, skb->len);
701 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
702 }
703 return -1;
704}
705
706struct netlink_broadcast_data {
707 struct sock *exclude_sk;
708 u32 pid;
709 u32 group;
710 int failure;
711 int congested;
712 int delivered;
713 int allocation;
714 struct sk_buff *skb, *skb2;
715};
716
717static inline int do_one_broadcast(struct sock *sk,
718 struct netlink_broadcast_data *p)
719{
720 struct netlink_sock *nlk = nlk_sk(sk);
721 int val;
722
723 if (p->exclude_sk == sk)
724 goto out;
725
726 if (nlk->pid == p->pid || !(nlk->groups & p->group))
727 goto out;
728
729 if (p->failure) {
730 netlink_overrun(sk);
731 goto out;
732 }
733
734 sock_hold(sk);
735 if (p->skb2 == NULL) {
736 if (atomic_read(&p->skb->users) != 1) {
737 p->skb2 = skb_clone(p->skb, p->allocation);
738 } else {
739 p->skb2 = p->skb;
740 atomic_inc(&p->skb->users);
741 }
742 }
743 if (p->skb2 == NULL) {
744 netlink_overrun(sk);
745 /* Clone failed. Notify ALL listeners. */
746 p->failure = 1;
747 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
748 netlink_overrun(sk);
749 } else {
750 p->congested |= val;
751 p->delivered = 1;
752 p->skb2 = NULL;
753 }
754 sock_put(sk);
755
756out:
757 return 0;
758}
759
760int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
761 u32 group, int allocation)
762{
763 struct netlink_broadcast_data info;
764 struct hlist_node *node;
765 struct sock *sk;
766
767 skb = netlink_trim(skb, allocation);
768
769 info.exclude_sk = ssk;
770 info.pid = pid;
771 info.group = group;
772 info.failure = 0;
773 info.congested = 0;
774 info.delivered = 0;
775 info.allocation = allocation;
776 info.skb = skb;
777 info.skb2 = NULL;
778
779 /* While we sleep in clone, do not allow to change socket list */
780
781 netlink_lock_table();
782
783 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
784 do_one_broadcast(sk, &info);
785
786 netlink_unlock_table();
787
788 if (info.skb2)
789 kfree_skb(info.skb2);
790 kfree_skb(skb);
791
792 if (info.delivered) {
793 if (info.congested && (allocation & __GFP_WAIT))
794 yield();
795 return 0;
796 }
797 if (info.failure)
798 return -ENOBUFS;
799 return -ESRCH;
800}
801
802struct netlink_set_err_data {
803 struct sock *exclude_sk;
804 u32 pid;
805 u32 group;
806 int code;
807};
808
809static inline int do_one_set_err(struct sock *sk,
810 struct netlink_set_err_data *p)
811{
812 struct netlink_sock *nlk = nlk_sk(sk);
813
814 if (sk == p->exclude_sk)
815 goto out;
816
817 if (nlk->pid == p->pid || !(nlk->groups & p->group))
818 goto out;
819
820 sk->sk_err = p->code;
821 sk->sk_error_report(sk);
822out:
823 return 0;
824}
825
826void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
827{
828 struct netlink_set_err_data info;
829 struct hlist_node *node;
830 struct sock *sk;
831
832 info.exclude_sk = ssk;
833 info.pid = pid;
834 info.group = group;
835 info.code = code;
836
837 read_lock(&nl_table_lock);
838
839 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
840 do_one_set_err(sk, &info);
841
842 read_unlock(&nl_table_lock);
843}
844
845static inline void netlink_rcv_wake(struct sock *sk)
846{
847 struct netlink_sock *nlk = nlk_sk(sk);
848
849 if (!skb_queue_len(&sk->sk_receive_queue))
850 clear_bit(0, &nlk->state);
851 if (!test_bit(0, &nlk->state))
852 wake_up_interruptible(&nlk->wait);
853}
854
855static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
856 struct msghdr *msg, size_t len)
857{
858 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
859 struct sock *sk = sock->sk;
860 struct netlink_sock *nlk = nlk_sk(sk);
861 struct sockaddr_nl *addr=msg->msg_name;
862 u32 dst_pid;
863 u32 dst_groups;
864 struct sk_buff *skb;
865 int err;
866 struct scm_cookie scm;
867
868 if (msg->msg_flags&MSG_OOB)
869 return -EOPNOTSUPP;
870
871 if (NULL == siocb->scm)
872 siocb->scm = &scm;
873 err = scm_send(sock, msg, siocb->scm);
874 if (err < 0)
875 return err;
876
877 if (msg->msg_namelen) {
878 if (addr->nl_family != AF_NETLINK)
879 return -EINVAL;
880 dst_pid = addr->nl_pid;
881 dst_groups = addr->nl_groups;
882 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
883 return -EPERM;
884 } else {
885 dst_pid = nlk->dst_pid;
886 dst_groups = nlk->dst_groups;
887 }
888
889 if (!nlk->pid) {
890 err = netlink_autobind(sock);
891 if (err)
892 goto out;
893 }
894
895 err = -EMSGSIZE;
896 if (len > sk->sk_sndbuf - 32)
897 goto out;
898 err = -ENOBUFS;
899 skb = alloc_skb(len, GFP_KERNEL);
900 if (skb==NULL)
901 goto out;
902
903 NETLINK_CB(skb).pid = nlk->pid;
904 NETLINK_CB(skb).groups = nlk->groups;
905 NETLINK_CB(skb).dst_pid = dst_pid;
906 NETLINK_CB(skb).dst_groups = dst_groups;
907 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
908
909 /* What can I do? Netlink is asynchronous, so that
910 we will have to save current capabilities to
911 check them, when this message will be delivered
912 to corresponding kernel module. --ANK (980802)
913 */
914
915 err = -EFAULT;
916 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
917 kfree_skb(skb);
918 goto out;
919 }
920
921 err = security_netlink_send(sk, skb);
922 if (err) {
923 kfree_skb(skb);
924 goto out;
925 }
926
927 if (dst_groups) {
928 atomic_inc(&skb->users);
929 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
930 }
931 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
932
933out:
934 return err;
935}
936
937static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
938 struct msghdr *msg, size_t len,
939 int flags)
940{
941 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
942 struct scm_cookie scm;
943 struct sock *sk = sock->sk;
944 struct netlink_sock *nlk = nlk_sk(sk);
945 int noblock = flags&MSG_DONTWAIT;
946 size_t copied;
947 struct sk_buff *skb;
948 int err;
949
950 if (flags&MSG_OOB)
951 return -EOPNOTSUPP;
952
953 copied = 0;
954
955 skb = skb_recv_datagram(sk,flags,noblock,&err);
956 if (skb==NULL)
957 goto out;
958
959 msg->msg_namelen = 0;
960
961 copied = skb->len;
962 if (len < copied) {
963 msg->msg_flags |= MSG_TRUNC;
964 copied = len;
965 }
966
967 skb->h.raw = skb->data;
968 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
969
970 if (msg->msg_name) {
971 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
972 addr->nl_family = AF_NETLINK;
973 addr->nl_pad = 0;
974 addr->nl_pid = NETLINK_CB(skb).pid;
975 addr->nl_groups = NETLINK_CB(skb).dst_groups;
976 msg->msg_namelen = sizeof(*addr);
977 }
978
979 if (NULL == siocb->scm) {
980 memset(&scm, 0, sizeof(scm));
981 siocb->scm = &scm;
982 }
983 siocb->scm->creds = *NETLINK_CREDS(skb);
984 skb_free_datagram(sk, skb);
985
986 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
987 netlink_dump(sk);
988
989 scm_recv(sock, msg, siocb->scm, flags);
990
991out:
992 netlink_rcv_wake(sk);
993 return err ? : copied;
994}
995
996static void netlink_data_ready(struct sock *sk, int len)
997{
998 struct netlink_sock *nlk = nlk_sk(sk);
999
1000 if (nlk->data_ready)
1001 nlk->data_ready(sk, len);
1002 netlink_rcv_wake(sk);
1003}
1004
1005/*
1006 * We export these functions to other modules. They provide a
1007 * complete set of kernel non-blocking support for message
1008 * queueing.
1009 */
1010
1011struct sock *
1012netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
1013{
1014 struct socket *sock;
1015 struct sock *sk;
1016
1017 if (!nl_table)
1018 return NULL;
1019
1020 if (unit<0 || unit>=MAX_LINKS)
1021 return NULL;
1022
1023 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1024 return NULL;
1025
1026 if (netlink_create(sock, unit) < 0) {
1027 sock_release(sock);
1028 return NULL;
1029 }
1030 sk = sock->sk;
1031 sk->sk_data_ready = netlink_data_ready;
1032 if (input)
1033 nlk_sk(sk)->data_ready = input;
1034
1035 if (netlink_insert(sk, 0)) {
1036 sock_release(sock);
1037 return NULL;
1038 }
1039 return sk;
1040}
1041
1042void netlink_set_nonroot(int protocol, unsigned int flags)
1043{
1044 if ((unsigned int)protocol < MAX_LINKS)
1045 nl_table[protocol].nl_nonroot = flags;
1046}
1047
1048static void netlink_destroy_callback(struct netlink_callback *cb)
1049{
1050 if (cb->skb)
1051 kfree_skb(cb->skb);
1052 kfree(cb);
1053}
1054
1055/*
1056 * It looks a bit ugly.
1057 * It would be better to create kernel thread.
1058 */
1059
1060static int netlink_dump(struct sock *sk)
1061{
1062 struct netlink_sock *nlk = nlk_sk(sk);
1063 struct netlink_callback *cb;
1064 struct sk_buff *skb;
1065 struct nlmsghdr *nlh;
1066 int len;
1067
1068 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1069 if (!skb)
1070 return -ENOBUFS;
1071
1072 spin_lock(&nlk->cb_lock);
1073
1074 cb = nlk->cb;
1075 if (cb == NULL) {
1076 spin_unlock(&nlk->cb_lock);
1077 kfree_skb(skb);
1078 return -EINVAL;
1079 }
1080
1081 len = cb->dump(skb, cb);
1082
1083 if (len > 0) {
1084 spin_unlock(&nlk->cb_lock);
1085 skb_queue_tail(&sk->sk_receive_queue, skb);
1086 sk->sk_data_ready(sk, len);
1087 return 0;
1088 }
1089
1090 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
1091 nlh->nlmsg_flags |= NLM_F_MULTI;
1092 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1093 skb_queue_tail(&sk->sk_receive_queue, skb);
1094 sk->sk_data_ready(sk, skb->len);
1095
1096 cb->done(cb);
1097 nlk->cb = NULL;
1098 spin_unlock(&nlk->cb_lock);
1099
1100 netlink_destroy_callback(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 return 0;
1102}
1103
1104int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1105 struct nlmsghdr *nlh,
1106 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1107 int (*done)(struct netlink_callback*))
1108{
1109 struct netlink_callback *cb;
1110 struct sock *sk;
1111 struct netlink_sock *nlk;
1112
1113 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1114 if (cb == NULL)
1115 return -ENOBUFS;
1116
1117 memset(cb, 0, sizeof(*cb));
1118 cb->dump = dump;
1119 cb->done = done;
1120 cb->nlh = nlh;
1121 atomic_inc(&skb->users);
1122 cb->skb = skb;
1123
1124 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1125 if (sk == NULL) {
1126 netlink_destroy_callback(cb);
1127 return -ECONNREFUSED;
1128 }
1129 nlk = nlk_sk(sk);
1130 /* A dump is in progress... */
1131 spin_lock(&nlk->cb_lock);
1132 if (nlk->cb) {
1133 spin_unlock(&nlk->cb_lock);
1134 netlink_destroy_callback(cb);
1135 sock_put(sk);
1136 return -EBUSY;
1137 }
1138 nlk->cb = cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 spin_unlock(&nlk->cb_lock);
1140
1141 netlink_dump(sk);
1142 sock_put(sk);
1143 return 0;
1144}
1145
1146void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1147{
1148 struct sk_buff *skb;
1149 struct nlmsghdr *rep;
1150 struct nlmsgerr *errmsg;
1151 int size;
1152
1153 if (err == 0)
1154 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1155 else
1156 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1157
1158 skb = alloc_skb(size, GFP_KERNEL);
1159 if (!skb) {
1160 struct sock *sk;
1161
1162 sk = netlink_lookup(in_skb->sk->sk_protocol,
1163 NETLINK_CB(in_skb).pid);
1164 if (sk) {
1165 sk->sk_err = ENOBUFS;
1166 sk->sk_error_report(sk);
1167 sock_put(sk);
1168 }
1169 return;
1170 }
1171
1172 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1173 NLMSG_ERROR, sizeof(struct nlmsgerr));
1174 errmsg = NLMSG_DATA(rep);
1175 errmsg->error = err;
1176 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1177 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1178}
1179
1180
1181#ifdef CONFIG_PROC_FS
1182struct nl_seq_iter {
1183 int link;
1184 int hash_idx;
1185};
1186
1187static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1188{
1189 struct nl_seq_iter *iter = seq->private;
1190 int i, j;
1191 struct sock *s;
1192 struct hlist_node *node;
1193 loff_t off = 0;
1194
1195 for (i=0; i<MAX_LINKS; i++) {
1196 struct nl_pid_hash *hash = &nl_table[i].hash;
1197
1198 for (j = 0; j <= hash->mask; j++) {
1199 sk_for_each(s, node, &hash->table[j]) {
1200 if (off == pos) {
1201 iter->link = i;
1202 iter->hash_idx = j;
1203 return s;
1204 }
1205 ++off;
1206 }
1207 }
1208 }
1209 return NULL;
1210}
1211
1212static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1213{
1214 read_lock(&nl_table_lock);
1215 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1216}
1217
1218static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1219{
1220 struct sock *s;
1221 struct nl_seq_iter *iter;
1222 int i, j;
1223
1224 ++*pos;
1225
1226 if (v == SEQ_START_TOKEN)
1227 return netlink_seq_socket_idx(seq, 0);
1228
1229 s = sk_next(v);
1230 if (s)
1231 return s;
1232
1233 iter = seq->private;
1234 i = iter->link;
1235 j = iter->hash_idx + 1;
1236
1237 do {
1238 struct nl_pid_hash *hash = &nl_table[i].hash;
1239
1240 for (; j <= hash->mask; j++) {
1241 s = sk_head(&hash->table[j]);
1242 if (s) {
1243 iter->link = i;
1244 iter->hash_idx = j;
1245 return s;
1246 }
1247 }
1248
1249 j = 0;
1250 } while (++i < MAX_LINKS);
1251
1252 return NULL;
1253}
1254
1255static void netlink_seq_stop(struct seq_file *seq, void *v)
1256{
1257 read_unlock(&nl_table_lock);
1258}
1259
1260
1261static int netlink_seq_show(struct seq_file *seq, void *v)
1262{
1263 if (v == SEQ_START_TOKEN)
1264 seq_puts(seq,
1265 "sk Eth Pid Groups "
1266 "Rmem Wmem Dump Locks\n");
1267 else {
1268 struct sock *s = v;
1269 struct netlink_sock *nlk = nlk_sk(s);
1270
1271 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1272 s,
1273 s->sk_protocol,
1274 nlk->pid,
1275 nlk->groups,
1276 atomic_read(&s->sk_rmem_alloc),
1277 atomic_read(&s->sk_wmem_alloc),
1278 nlk->cb,
1279 atomic_read(&s->sk_refcnt)
1280 );
1281
1282 }
1283 return 0;
1284}
1285
1286static struct seq_operations netlink_seq_ops = {
1287 .start = netlink_seq_start,
1288 .next = netlink_seq_next,
1289 .stop = netlink_seq_stop,
1290 .show = netlink_seq_show,
1291};
1292
1293
1294static int netlink_seq_open(struct inode *inode, struct file *file)
1295{
1296 struct seq_file *seq;
1297 struct nl_seq_iter *iter;
1298 int err;
1299
1300 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1301 if (!iter)
1302 return -ENOMEM;
1303
1304 err = seq_open(file, &netlink_seq_ops);
1305 if (err) {
1306 kfree(iter);
1307 return err;
1308 }
1309
1310 memset(iter, 0, sizeof(*iter));
1311 seq = file->private_data;
1312 seq->private = iter;
1313 return 0;
1314}
1315
1316static struct file_operations netlink_seq_fops = {
1317 .owner = THIS_MODULE,
1318 .open = netlink_seq_open,
1319 .read = seq_read,
1320 .llseek = seq_lseek,
1321 .release = seq_release_private,
1322};
1323
1324#endif
1325
1326int netlink_register_notifier(struct notifier_block *nb)
1327{
1328 return notifier_chain_register(&netlink_chain, nb);
1329}
1330
1331int netlink_unregister_notifier(struct notifier_block *nb)
1332{
1333 return notifier_chain_unregister(&netlink_chain, nb);
1334}
1335
1336static struct proto_ops netlink_ops = {
1337 .family = PF_NETLINK,
1338 .owner = THIS_MODULE,
1339 .release = netlink_release,
1340 .bind = netlink_bind,
1341 .connect = netlink_connect,
1342 .socketpair = sock_no_socketpair,
1343 .accept = sock_no_accept,
1344 .getname = netlink_getname,
1345 .poll = datagram_poll,
1346 .ioctl = sock_no_ioctl,
1347 .listen = sock_no_listen,
1348 .shutdown = sock_no_shutdown,
1349 .setsockopt = sock_no_setsockopt,
1350 .getsockopt = sock_no_getsockopt,
1351 .sendmsg = netlink_sendmsg,
1352 .recvmsg = netlink_recvmsg,
1353 .mmap = sock_no_mmap,
1354 .sendpage = sock_no_sendpage,
1355};
1356
1357static struct net_proto_family netlink_family_ops = {
1358 .family = PF_NETLINK,
1359 .create = netlink_create,
1360 .owner = THIS_MODULE, /* for consistency 8) */
1361};
1362
1363extern void netlink_skb_parms_too_large(void);
1364
1365static int __init netlink_proto_init(void)
1366{
1367 struct sk_buff *dummy_skb;
1368 int i;
1369 unsigned long max;
1370 unsigned int order;
1371 int err = proto_register(&netlink_proto, 0);
1372
1373 if (err != 0)
1374 goto out;
1375
1376 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1377 netlink_skb_parms_too_large();
1378
1379 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1380 if (!nl_table) {
1381enomem:
1382 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1383 return -ENOMEM;
1384 }
1385
1386 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1387
1388 if (num_physpages >= (128 * 1024))
1389 max = num_physpages >> (21 - PAGE_SHIFT);
1390 else
1391 max = num_physpages >> (23 - PAGE_SHIFT);
1392
1393 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1394 max = (1UL << order) / sizeof(struct hlist_head);
1395 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1396
1397 for (i = 0; i < MAX_LINKS; i++) {
1398 struct nl_pid_hash *hash = &nl_table[i].hash;
1399
1400 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1401 if (!hash->table) {
1402 while (i-- > 0)
1403 nl_pid_hash_free(nl_table[i].hash.table,
1404 1 * sizeof(*hash->table));
1405 kfree(nl_table);
1406 goto enomem;
1407 }
1408 memset(hash->table, 0, 1 * sizeof(*hash->table));
1409 hash->max_shift = order;
1410 hash->shift = 0;
1411 hash->mask = 0;
1412 hash->rehash_time = jiffies;
1413 }
1414
1415 sock_register(&netlink_family_ops);
1416#ifdef CONFIG_PROC_FS
1417 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1418#endif
1419 /* The netlink device handler may be needed early. */
1420 rtnetlink_init();
1421out:
1422 return err;
1423}
1424
1425static void __exit netlink_proto_exit(void)
1426{
1427 sock_unregister(PF_NETLINK);
1428 proc_net_remove("netlink");
1429 kfree(nl_table);
1430 nl_table = NULL;
1431 proto_unregister(&netlink_proto);
1432}
1433
1434core_initcall(netlink_proto_init);
1435module_exit(netlink_proto_exit);
1436
1437MODULE_LICENSE("GPL");
1438
1439MODULE_ALIAS_NETPROTO(PF_NETLINK);
1440
1441EXPORT_SYMBOL(netlink_ack);
1442EXPORT_SYMBOL(netlink_broadcast);
1443EXPORT_SYMBOL(netlink_dump_start);
1444EXPORT_SYMBOL(netlink_kernel_create);
1445EXPORT_SYMBOL(netlink_register_notifier);
1446EXPORT_SYMBOL(netlink_set_err);
1447EXPORT_SYMBOL(netlink_set_nonroot);
1448EXPORT_SYMBOL(netlink_unicast);
1449EXPORT_SYMBOL(netlink_unregister_notifier);
1450