2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
59 #include <net/net_namespace.h>
62 #include <net/netlink.h>
64 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
68 /* struct sock has to be the first member of netlink_sock */
76 unsigned long *groups;
78 wait_queue_head_t wait;
79 struct netlink_callback *cb;
80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex;
82 void (*netlink_rcv)(struct sk_buff *skb);
83 struct module *module;
88 unsigned long masks[0];
91 #define NETLINK_KERNEL_SOCKET 0x1
92 #define NETLINK_RECV_PKTINFO 0x2
93 #define NETLINK_BROADCAST_SEND_ERROR 0x4
94 #define NETLINK_RECV_NO_ENOBUFS 0x8
96 static inline struct netlink_sock *nlk_sk(struct sock *sk)
98 return container_of(sk, struct netlink_sock, sk);
101 static inline int netlink_is_kernel(struct sock *sk)
103 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
107 struct hlist_head *table;
108 unsigned long rehash_time;
113 unsigned int entries;
114 unsigned int max_shift;
119 struct netlink_table {
120 struct nl_pid_hash hash;
121 struct hlist_head mc_list;
122 struct listeners __rcu *listeners;
123 unsigned int nl_nonroot;
125 struct mutex *cb_mutex;
126 struct module *module;
130 static struct netlink_table *nl_table;
132 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
134 static int netlink_dump(struct sock *sk);
135 static void netlink_destroy_callback(struct netlink_callback *cb);
137 static DEFINE_RWLOCK(nl_table_lock);
138 static atomic_t nl_table_users = ATOMIC_INIT(0);
140 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
142 static inline u32 netlink_group_mask(u32 group)
144 return group ? 1 << (group - 1) : 0;
147 static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
149 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
152 static void netlink_sock_destruct(struct sock *sk)
154 struct netlink_sock *nlk = nlk_sk(sk);
158 nlk->cb->done(nlk->cb);
159 netlink_destroy_callback(nlk->cb);
162 skb_queue_purge(&sk->sk_receive_queue);
164 if (!sock_flag(sk, SOCK_DEAD)) {
165 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
169 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
170 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
171 WARN_ON(nlk_sk(sk)->groups);
174 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
175 * SMP. Look, when several writers sleep and reader wakes them up, all but one
176 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
177 * this, _but_ remember, it adds useless work on UP machines.
180 void netlink_table_grab(void)
181 __acquires(nl_table_lock)
185 write_lock_irq(&nl_table_lock);
187 if (atomic_read(&nl_table_users)) {
188 DECLARE_WAITQUEUE(wait, current);
190 add_wait_queue_exclusive(&nl_table_wait, &wait);
192 set_current_state(TASK_UNINTERRUPTIBLE);
193 if (atomic_read(&nl_table_users) == 0)
195 write_unlock_irq(&nl_table_lock);
197 write_lock_irq(&nl_table_lock);
200 __set_current_state(TASK_RUNNING);
201 remove_wait_queue(&nl_table_wait, &wait);
205 void netlink_table_ungrab(void)
206 __releases(nl_table_lock)
208 write_unlock_irq(&nl_table_lock);
209 wake_up(&nl_table_wait);
213 netlink_lock_table(void)
215 /* read_lock() synchronizes us to netlink_table_grab */
217 read_lock(&nl_table_lock);
218 atomic_inc(&nl_table_users);
219 read_unlock(&nl_table_lock);
223 netlink_unlock_table(void)
225 if (atomic_dec_and_test(&nl_table_users))
226 wake_up(&nl_table_wait);
229 static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
231 struct nl_pid_hash *hash = &nl_table[protocol].hash;
232 struct hlist_head *head;
234 struct hlist_node *node;
236 read_lock(&nl_table_lock);
237 head = nl_pid_hashfn(hash, pid);
238 sk_for_each(sk, node, head) {
239 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
246 read_unlock(&nl_table_lock);
250 static struct hlist_head *nl_pid_hash_zalloc(size_t size)
252 if (size <= PAGE_SIZE)
253 return kzalloc(size, GFP_ATOMIC);
255 return (struct hlist_head *)
256 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
260 static void nl_pid_hash_free(struct hlist_head *table, size_t size)
262 if (size <= PAGE_SIZE)
265 free_pages((unsigned long)table, get_order(size));
268 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
270 unsigned int omask, mask, shift;
272 struct hlist_head *otable, *table;
275 omask = mask = hash->mask;
276 osize = size = (mask + 1) * sizeof(*table);
280 if (++shift > hash->max_shift)
286 table = nl_pid_hash_zalloc(size);
290 otable = hash->table;
294 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
296 for (i = 0; i <= omask; i++) {
298 struct hlist_node *node, *tmp;
300 sk_for_each_safe(sk, node, tmp, &otable[i])
301 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
304 nl_pid_hash_free(otable, osize);
305 hash->rehash_time = jiffies + 10 * 60 * HZ;
309 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
311 int avg = hash->entries >> hash->shift;
313 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
316 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
317 nl_pid_hash_rehash(hash, 0);
324 static const struct proto_ops netlink_ops;
327 netlink_update_listeners(struct sock *sk)
329 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
330 struct hlist_node *node;
334 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
336 sk_for_each_bound(sk, node, &tbl->mc_list) {
337 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
338 mask |= nlk_sk(sk)->groups[i];
340 tbl->listeners->masks[i] = mask;
342 /* this function is only called with the netlink table "grabbed", which
343 * makes sure updates are visible before bind or setsockopt return. */
346 static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
348 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
349 struct hlist_head *head;
350 int err = -EADDRINUSE;
352 struct hlist_node *node;
355 netlink_table_grab();
356 head = nl_pid_hashfn(hash, pid);
358 sk_for_each(osk, node, head) {
359 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
371 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
374 if (len && nl_pid_hash_dilute(hash, len))
375 head = nl_pid_hashfn(hash, pid);
377 nlk_sk(sk)->pid = pid;
378 sk_add_node(sk, head);
382 netlink_table_ungrab();
386 static void netlink_remove(struct sock *sk)
388 netlink_table_grab();
389 if (sk_del_node_init(sk))
390 nl_table[sk->sk_protocol].hash.entries--;
391 if (nlk_sk(sk)->subscriptions)
392 __sk_del_bind_node(sk);
393 netlink_table_ungrab();
396 static struct proto netlink_proto = {
398 .owner = THIS_MODULE,
399 .obj_size = sizeof(struct netlink_sock),
402 static int __netlink_create(struct net *net, struct socket *sock,
403 struct mutex *cb_mutex, int protocol)
406 struct netlink_sock *nlk;
408 sock->ops = &netlink_ops;
410 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
414 sock_init_data(sock, sk);
418 nlk->cb_mutex = cb_mutex;
420 nlk->cb_mutex = &nlk->cb_def_mutex;
421 mutex_init(nlk->cb_mutex);
423 init_waitqueue_head(&nlk->wait);
425 sk->sk_destruct = netlink_sock_destruct;
426 sk->sk_protocol = protocol;
430 static int netlink_create(struct net *net, struct socket *sock, int protocol,
433 struct module *module = NULL;
434 struct mutex *cb_mutex;
435 struct netlink_sock *nlk;
438 sock->state = SS_UNCONNECTED;
440 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
441 return -ESOCKTNOSUPPORT;
443 if (protocol < 0 || protocol >= MAX_LINKS)
444 return -EPROTONOSUPPORT;
446 netlink_lock_table();
447 #ifdef CONFIG_MODULES
448 if (!nl_table[protocol].registered) {
449 netlink_unlock_table();
450 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
451 netlink_lock_table();
454 if (nl_table[protocol].registered &&
455 try_module_get(nl_table[protocol].module))
456 module = nl_table[protocol].module;
458 err = -EPROTONOSUPPORT;
459 cb_mutex = nl_table[protocol].cb_mutex;
460 netlink_unlock_table();
465 err = __netlink_create(net, sock, cb_mutex, protocol);
470 sock_prot_inuse_add(net, &netlink_proto, 1);
473 nlk = nlk_sk(sock->sk);
474 nlk->module = module;
483 static int netlink_release(struct socket *sock)
485 struct sock *sk = sock->sk;
486 struct netlink_sock *nlk;
496 * OK. Socket is unlinked, any packets that arrive now
501 wake_up_interruptible_all(&nlk->wait);
503 skb_queue_purge(&sk->sk_write_queue);
506 struct netlink_notify n = {
508 .protocol = sk->sk_protocol,
511 atomic_notifier_call_chain(&netlink_chain,
512 NETLINK_URELEASE, &n);
515 module_put(nlk->module);
517 netlink_table_grab();
518 if (netlink_is_kernel(sk)) {
519 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
520 if (--nl_table[sk->sk_protocol].registered == 0) {
521 kfree(nl_table[sk->sk_protocol].listeners);
522 nl_table[sk->sk_protocol].module = NULL;
523 nl_table[sk->sk_protocol].registered = 0;
525 } else if (nlk->subscriptions)
526 netlink_update_listeners(sk);
527 netlink_table_ungrab();
533 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
539 static int netlink_autobind(struct socket *sock)
541 struct sock *sk = sock->sk;
542 struct net *net = sock_net(sk);
543 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
544 struct hlist_head *head;
546 struct hlist_node *node;
547 s32 pid = task_tgid_vnr(current);
549 static s32 rover = -4097;
553 netlink_table_grab();
554 head = nl_pid_hashfn(hash, pid);
555 sk_for_each(osk, node, head) {
556 if (!net_eq(sock_net(osk), net))
558 if (nlk_sk(osk)->pid == pid) {
559 /* Bind collision, search negative pid values. */
563 netlink_table_ungrab();
567 netlink_table_ungrab();
569 err = netlink_insert(sk, net, pid);
570 if (err == -EADDRINUSE)
573 /* If 2 threads race to autobind, that is fine. */
580 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
582 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
583 capable(CAP_NET_ADMIN);
587 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
589 struct netlink_sock *nlk = nlk_sk(sk);
591 if (nlk->subscriptions && !subscriptions)
592 __sk_del_bind_node(sk);
593 else if (!nlk->subscriptions && subscriptions)
594 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
595 nlk->subscriptions = subscriptions;
598 static int netlink_realloc_groups(struct sock *sk)
600 struct netlink_sock *nlk = nlk_sk(sk);
602 unsigned long *new_groups;
605 netlink_table_grab();
607 groups = nl_table[sk->sk_protocol].groups;
608 if (!nl_table[sk->sk_protocol].registered) {
613 if (nlk->ngroups >= groups)
616 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
617 if (new_groups == NULL) {
621 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
622 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
624 nlk->groups = new_groups;
625 nlk->ngroups = groups;
627 netlink_table_ungrab();
631 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
634 struct sock *sk = sock->sk;
635 struct net *net = sock_net(sk);
636 struct netlink_sock *nlk = nlk_sk(sk);
637 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
640 if (nladdr->nl_family != AF_NETLINK)
643 /* Only superuser is allowed to listen multicasts */
644 if (nladdr->nl_groups) {
645 if (!netlink_capable(sock, NL_NONROOT_RECV))
647 err = netlink_realloc_groups(sk);
653 if (nladdr->nl_pid != nlk->pid)
656 err = nladdr->nl_pid ?
657 netlink_insert(sk, net, nladdr->nl_pid) :
658 netlink_autobind(sock);
663 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
666 netlink_table_grab();
667 netlink_update_subscriptions(sk, nlk->subscriptions +
668 hweight32(nladdr->nl_groups) -
669 hweight32(nlk->groups[0]));
670 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
671 netlink_update_listeners(sk);
672 netlink_table_ungrab();
677 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
681 struct sock *sk = sock->sk;
682 struct netlink_sock *nlk = nlk_sk(sk);
683 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
685 if (alen < sizeof(addr->sa_family))
688 if (addr->sa_family == AF_UNSPEC) {
689 sk->sk_state = NETLINK_UNCONNECTED;
694 if (addr->sa_family != AF_NETLINK)
697 /* Only superuser is allowed to send multicasts */
698 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
702 err = netlink_autobind(sock);
705 sk->sk_state = NETLINK_CONNECTED;
706 nlk->dst_pid = nladdr->nl_pid;
707 nlk->dst_group = ffs(nladdr->nl_groups);
713 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
714 int *addr_len, int peer)
716 struct sock *sk = sock->sk;
717 struct netlink_sock *nlk = nlk_sk(sk);
718 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
720 nladdr->nl_family = AF_NETLINK;
722 *addr_len = sizeof(*nladdr);
725 nladdr->nl_pid = nlk->dst_pid;
726 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
728 nladdr->nl_pid = nlk->pid;
729 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
734 static void netlink_overrun(struct sock *sk)
736 struct netlink_sock *nlk = nlk_sk(sk);
738 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
739 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
740 sk->sk_err = ENOBUFS;
741 sk->sk_error_report(sk);
744 atomic_inc(&sk->sk_drops);
747 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
750 struct netlink_sock *nlk;
752 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
754 return ERR_PTR(-ECONNREFUSED);
756 /* Don't bother queuing skb if kernel socket has no input function */
758 if (sock->sk_state == NETLINK_CONNECTED &&
759 nlk->dst_pid != nlk_sk(ssk)->pid) {
761 return ERR_PTR(-ECONNREFUSED);
766 struct sock *netlink_getsockbyfilp(struct file *filp)
768 struct inode *inode = filp->f_path.dentry->d_inode;
771 if (!S_ISSOCK(inode->i_mode))
772 return ERR_PTR(-ENOTSOCK);
774 sock = SOCKET_I(inode)->sk;
775 if (sock->sk_family != AF_NETLINK)
776 return ERR_PTR(-EINVAL);
783 * Attach a skb to a netlink socket.
784 * The caller must hold a reference to the destination socket. On error, the
785 * reference is dropped. The skb is not send to the destination, just all
786 * all error checks are performed and memory in the queue is reserved.
788 * < 0: error. skb freed, reference to sock dropped.
790 * 1: repeat lookup - reference dropped while waiting for socket memory.
792 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
793 long *timeo, struct sock *ssk)
795 struct netlink_sock *nlk;
799 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
800 test_bit(0, &nlk->state)) {
801 DECLARE_WAITQUEUE(wait, current);
803 if (!ssk || netlink_is_kernel(ssk))
810 __set_current_state(TASK_INTERRUPTIBLE);
811 add_wait_queue(&nlk->wait, &wait);
813 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
814 test_bit(0, &nlk->state)) &&
815 !sock_flag(sk, SOCK_DEAD))
816 *timeo = schedule_timeout(*timeo);
818 __set_current_state(TASK_RUNNING);
819 remove_wait_queue(&nlk->wait, &wait);
822 if (signal_pending(current)) {
824 return sock_intr_errno(*timeo);
828 skb_set_owner_r(skb, sk);
832 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
836 skb_queue_tail(&sk->sk_receive_queue, skb);
837 sk->sk_data_ready(sk, len);
842 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
848 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
854 delta = skb->end - skb->tail;
855 if (delta * 2 < skb->truesize)
858 if (skb_shared(skb)) {
859 struct sk_buff *nskb = skb_clone(skb, allocation);
866 if (!pskb_expand_head(skb, 0, -delta, allocation))
867 skb->truesize -= delta;
872 static void netlink_rcv_wake(struct sock *sk)
874 struct netlink_sock *nlk = nlk_sk(sk);
876 if (skb_queue_empty(&sk->sk_receive_queue))
877 clear_bit(0, &nlk->state);
878 if (!test_bit(0, &nlk->state))
879 wake_up_interruptible(&nlk->wait);
882 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
885 struct netlink_sock *nlk = nlk_sk(sk);
888 if (nlk->netlink_rcv != NULL) {
890 skb_set_owner_r(skb, sk);
891 nlk->netlink_rcv(skb);
898 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
899 u32 pid, int nonblock)
905 skb = netlink_trim(skb, gfp_any());
907 timeo = sock_sndtimeo(ssk, nonblock);
909 sk = netlink_getsockbypid(ssk, pid);
914 if (netlink_is_kernel(sk))
915 return netlink_unicast_kernel(sk, skb);
917 if (sk_filter(sk, skb)) {
924 err = netlink_attachskb(sk, skb, &timeo, ssk);
930 return netlink_sendskb(sk, skb);
932 EXPORT_SYMBOL(netlink_unicast);
934 int netlink_has_listeners(struct sock *sk, unsigned int group)
937 struct listeners *listeners;
939 BUG_ON(!netlink_is_kernel(sk));
942 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
944 if (group - 1 < nl_table[sk->sk_protocol].groups)
945 res = test_bit(group - 1, listeners->masks);
951 EXPORT_SYMBOL_GPL(netlink_has_listeners);
953 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
955 struct netlink_sock *nlk = nlk_sk(sk);
957 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
958 !test_bit(0, &nlk->state)) {
959 skb_set_owner_r(skb, sk);
960 skb_queue_tail(&sk->sk_receive_queue, skb);
961 sk->sk_data_ready(sk, skb->len);
962 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
967 struct netlink_broadcast_data {
968 struct sock *exclude_sk;
973 int delivery_failure;
977 struct sk_buff *skb, *skb2;
978 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
982 static int do_one_broadcast(struct sock *sk,
983 struct netlink_broadcast_data *p)
985 struct netlink_sock *nlk = nlk_sk(sk);
988 if (p->exclude_sk == sk)
991 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
992 !test_bit(p->group - 1, nlk->groups))
995 if (!net_eq(sock_net(sk), p->net))
1004 if (p->skb2 == NULL) {
1005 if (skb_shared(p->skb)) {
1006 p->skb2 = skb_clone(p->skb, p->allocation);
1008 p->skb2 = skb_get(p->skb);
1010 * skb ownership may have been set when
1011 * delivered to a previous socket.
1013 skb_orphan(p->skb2);
1016 if (p->skb2 == NULL) {
1017 netlink_overrun(sk);
1018 /* Clone failed. Notify ALL listeners. */
1020 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1021 p->delivery_failure = 1;
1022 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1025 } else if (sk_filter(sk, p->skb2)) {
1028 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1029 netlink_overrun(sk);
1030 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1031 p->delivery_failure = 1;
1033 p->congested |= val;
1043 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1044 u32 group, gfp_t allocation,
1045 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1048 struct net *net = sock_net(ssk);
1049 struct netlink_broadcast_data info;
1050 struct hlist_node *node;
1053 skb = netlink_trim(skb, allocation);
1055 info.exclude_sk = ssk;
1060 info.delivery_failure = 0;
1063 info.allocation = allocation;
1066 info.tx_filter = filter;
1067 info.tx_data = filter_data;
1069 /* While we sleep in clone, do not allow to change socket list */
1071 netlink_lock_table();
1073 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1074 do_one_broadcast(sk, &info);
1078 netlink_unlock_table();
1080 if (info.delivery_failure) {
1081 kfree_skb(info.skb2);
1084 consume_skb(info.skb2);
1086 if (info.delivered) {
1087 if (info.congested && (allocation & __GFP_WAIT))
1093 EXPORT_SYMBOL(netlink_broadcast_filtered);
1095 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1096 u32 group, gfp_t allocation)
1098 return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
1101 EXPORT_SYMBOL(netlink_broadcast);
1103 struct netlink_set_err_data {
1104 struct sock *exclude_sk;
1110 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1112 struct netlink_sock *nlk = nlk_sk(sk);
1115 if (sk == p->exclude_sk)
1118 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1121 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1122 !test_bit(p->group - 1, nlk->groups))
1125 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1130 sk->sk_err = p->code;
1131 sk->sk_error_report(sk);
1137 * netlink_set_err - report error to broadcast listeners
1138 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1139 * @pid: the PID of a process that we want to skip (if any)
1140 * @groups: the broadcast group that will notice the error
1141 * @code: error code, must be negative (as usual in kernelspace)
1143 * This function returns the number of broadcast listeners that have set the
1144 * NETLINK_RECV_NO_ENOBUFS socket option.
1146 int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1148 struct netlink_set_err_data info;
1149 struct hlist_node *node;
1153 info.exclude_sk = ssk;
1156 /* sk->sk_err wants a positive error value */
1159 read_lock(&nl_table_lock);
1161 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1162 ret += do_one_set_err(sk, &info);
1164 read_unlock(&nl_table_lock);
1167 EXPORT_SYMBOL(netlink_set_err);
1169 /* must be called with netlink table grabbed */
1170 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1174 int old, new = !!is_new, subscriptions;
1176 old = test_bit(group - 1, nlk->groups);
1177 subscriptions = nlk->subscriptions - old + new;
1179 __set_bit(group - 1, nlk->groups);
1181 __clear_bit(group - 1, nlk->groups);
1182 netlink_update_subscriptions(&nlk->sk, subscriptions);
1183 netlink_update_listeners(&nlk->sk);
1186 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1187 char __user *optval, unsigned int optlen)
1189 struct sock *sk = sock->sk;
1190 struct netlink_sock *nlk = nlk_sk(sk);
1191 unsigned int val = 0;
1194 if (level != SOL_NETLINK)
1195 return -ENOPROTOOPT;
1197 if (optlen >= sizeof(int) &&
1198 get_user(val, (unsigned int __user *)optval))
1202 case NETLINK_PKTINFO:
1204 nlk->flags |= NETLINK_RECV_PKTINFO;
1206 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1209 case NETLINK_ADD_MEMBERSHIP:
1210 case NETLINK_DROP_MEMBERSHIP: {
1211 if (!netlink_capable(sock, NL_NONROOT_RECV))
1213 err = netlink_realloc_groups(sk);
1216 if (!val || val - 1 >= nlk->ngroups)
1218 netlink_table_grab();
1219 netlink_update_socket_mc(nlk, val,
1220 optname == NETLINK_ADD_MEMBERSHIP);
1221 netlink_table_ungrab();
1225 case NETLINK_BROADCAST_ERROR:
1227 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1229 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1232 case NETLINK_NO_ENOBUFS:
1234 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1235 clear_bit(0, &nlk->state);
1236 wake_up_interruptible(&nlk->wait);
1238 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1247 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1248 char __user *optval, int __user *optlen)
1250 struct sock *sk = sock->sk;
1251 struct netlink_sock *nlk = nlk_sk(sk);
1254 if (level != SOL_NETLINK)
1255 return -ENOPROTOOPT;
1257 if (get_user(len, optlen))
1263 case NETLINK_PKTINFO:
1264 if (len < sizeof(int))
1267 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1268 if (put_user(len, optlen) ||
1269 put_user(val, optval))
1273 case NETLINK_BROADCAST_ERROR:
1274 if (len < sizeof(int))
1277 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1278 if (put_user(len, optlen) ||
1279 put_user(val, optval))
1283 case NETLINK_NO_ENOBUFS:
1284 if (len < sizeof(int))
1287 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1288 if (put_user(len, optlen) ||
1289 put_user(val, optval))
1299 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1301 struct nl_pktinfo info;
1303 info.group = NETLINK_CB(skb).dst_group;
1304 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1307 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1308 struct msghdr *msg, size_t len)
1310 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1311 struct sock *sk = sock->sk;
1312 struct netlink_sock *nlk = nlk_sk(sk);
1313 struct sockaddr_nl *addr = msg->msg_name;
1316 struct sk_buff *skb;
1318 struct scm_cookie scm;
1320 if (msg->msg_flags&MSG_OOB)
1323 if (NULL == siocb->scm)
1326 err = scm_send(sock, msg, siocb->scm);
1330 if (msg->msg_namelen) {
1332 if (addr->nl_family != AF_NETLINK)
1334 dst_pid = addr->nl_pid;
1335 dst_group = ffs(addr->nl_groups);
1337 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1340 dst_pid = nlk->dst_pid;
1341 dst_group = nlk->dst_group;
1345 err = netlink_autobind(sock);
1351 if (len > sk->sk_sndbuf - 32)
1354 skb = alloc_skb(len, GFP_KERNEL);
1358 NETLINK_CB(skb).pid = nlk->pid;
1359 NETLINK_CB(skb).dst_group = dst_group;
1360 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1363 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1368 err = security_netlink_send(sk, skb);
1375 atomic_inc(&skb->users);
1376 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1378 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1381 scm_destroy(siocb->scm);
1385 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1386 struct msghdr *msg, size_t len,
1389 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1390 struct scm_cookie scm;
1391 struct sock *sk = sock->sk;
1392 struct netlink_sock *nlk = nlk_sk(sk);
1393 int noblock = flags&MSG_DONTWAIT;
1395 struct sk_buff *skb, *data_skb;
1403 skb = skb_recv_datagram(sk, flags, noblock, &err);
1409 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1410 if (unlikely(skb_shinfo(skb)->frag_list)) {
1412 * If this skb has a frag_list, then here that means that we
1413 * will have to use the frag_list skb's data for compat tasks
1414 * and the regular skb's data for normal (non-compat) tasks.
1416 * If we need to send the compat skb, assign it to the
1417 * 'data_skb' variable so that it will be used below for data
1418 * copying. We keep 'skb' for everything else, including
1419 * freeing both later.
1421 if (flags & MSG_CMSG_COMPAT)
1422 data_skb = skb_shinfo(skb)->frag_list;
1426 msg->msg_namelen = 0;
1428 copied = data_skb->len;
1430 msg->msg_flags |= MSG_TRUNC;
1434 skb_reset_transport_header(data_skb);
1435 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1437 if (msg->msg_name) {
1438 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1439 addr->nl_family = AF_NETLINK;
1441 addr->nl_pid = NETLINK_CB(skb).pid;
1442 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1443 msg->msg_namelen = sizeof(*addr);
1446 if (nlk->flags & NETLINK_RECV_PKTINFO)
1447 netlink_cmsg_recv_pktinfo(msg, skb);
1449 if (NULL == siocb->scm) {
1450 memset(&scm, 0, sizeof(scm));
1453 siocb->scm->creds = *NETLINK_CREDS(skb);
1454 if (flags & MSG_TRUNC)
1455 copied = data_skb->len;
1457 skb_free_datagram(sk, skb);
1459 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1460 ret = netlink_dump(sk);
1463 sk->sk_error_report(sk);
1467 scm_recv(sock, msg, siocb->scm, flags);
1469 netlink_rcv_wake(sk);
1470 return err ? : copied;
1473 static void netlink_data_ready(struct sock *sk, int len)
1479 * We export these functions to other modules. They provide a
1480 * complete set of kernel non-blocking support for message
1485 netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1486 void (*input)(struct sk_buff *skb),
1487 struct mutex *cb_mutex, struct module *module)
1489 struct socket *sock;
1491 struct netlink_sock *nlk;
1492 struct listeners *listeners = NULL;
1496 if (unit < 0 || unit >= MAX_LINKS)
1499 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1503 * We have to just have a reference on the net from sk, but don't
1504 * get_net it. Besides, we cannot get and then put the net here.
1505 * So we create one inside init_net and the move it to net.
1508 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1509 goto out_sock_release_nosk;
1512 sk_change_net(sk, net);
1517 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1519 goto out_sock_release;
1521 sk->sk_data_ready = netlink_data_ready;
1523 nlk_sk(sk)->netlink_rcv = input;
1525 if (netlink_insert(sk, net, 0))
1526 goto out_sock_release;
1529 nlk->flags |= NETLINK_KERNEL_SOCKET;
1531 netlink_table_grab();
1532 if (!nl_table[unit].registered) {
1533 nl_table[unit].groups = groups;
1534 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1535 nl_table[unit].cb_mutex = cb_mutex;
1536 nl_table[unit].module = module;
1537 nl_table[unit].registered = 1;
1540 nl_table[unit].registered++;
1542 netlink_table_ungrab();
1547 netlink_kernel_release(sk);
1550 out_sock_release_nosk:
1554 EXPORT_SYMBOL(netlink_kernel_create);
1558 netlink_kernel_release(struct sock *sk)
1560 sk_release_kernel(sk);
1562 EXPORT_SYMBOL(netlink_kernel_release);
1564 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1566 struct listeners *new, *old;
1567 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1572 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1573 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1576 old = rcu_dereference_protected(tbl->listeners, 1);
1577 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1578 rcu_assign_pointer(tbl->listeners, new);
1580 kfree_rcu(old, rcu);
1582 tbl->groups = groups;
1588 * netlink_change_ngroups - change number of multicast groups
1590 * This changes the number of multicast groups that are available
1591 * on a certain netlink family. Note that it is not possible to
1592 * change the number of groups to below 32. Also note that it does
1593 * not implicitly call netlink_clear_multicast_users() when the
1594 * number of groups is reduced.
1596 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1597 * @groups: The new number of groups.
1599 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1603 netlink_table_grab();
1604 err = __netlink_change_ngroups(sk, groups);
1605 netlink_table_ungrab();
1610 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1613 struct hlist_node *node;
1614 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1616 sk_for_each_bound(sk, node, &tbl->mc_list)
1617 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1621 * netlink_clear_multicast_users - kick off multicast listeners
1623 * This function removes all listeners from the given group.
1624 * @ksk: The kernel netlink socket, as returned by
1625 * netlink_kernel_create().
1626 * @group: The multicast group to clear.
1628 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1630 netlink_table_grab();
1631 __netlink_clear_multicast_users(ksk, group);
1632 netlink_table_ungrab();
1635 void netlink_set_nonroot(int protocol, unsigned int flags)
1637 if ((unsigned int)protocol < MAX_LINKS)
1638 nl_table[protocol].nl_nonroot = flags;
1640 EXPORT_SYMBOL(netlink_set_nonroot);
1642 static void netlink_destroy_callback(struct netlink_callback *cb)
1649 __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1651 struct nlmsghdr *nlh;
1652 int size = NLMSG_LENGTH(len);
1654 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1655 nlh->nlmsg_type = type;
1656 nlh->nlmsg_len = size;
1657 nlh->nlmsg_flags = flags;
1658 nlh->nlmsg_pid = pid;
1659 nlh->nlmsg_seq = seq;
1660 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1661 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1664 EXPORT_SYMBOL(__nlmsg_put);
1667 * It looks a bit ugly.
1668 * It would be better to create kernel thread.
1671 static int netlink_dump(struct sock *sk)
1673 struct netlink_sock *nlk = nlk_sk(sk);
1674 struct netlink_callback *cb;
1675 struct sk_buff *skb = NULL;
1676 struct nlmsghdr *nlh;
1677 int len, err = -ENOBUFS;
1680 mutex_lock(nlk->cb_mutex);
1688 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1690 skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1694 len = cb->dump(skb, cb);
1697 mutex_unlock(nlk->cb_mutex);
1699 if (sk_filter(sk, skb))
1702 skb_queue_tail(&sk->sk_receive_queue, skb);
1703 sk->sk_data_ready(sk, skb->len);
1708 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1712 nl_dump_check_consistent(cb, nlh);
1714 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1716 if (sk_filter(sk, skb))
1719 skb_queue_tail(&sk->sk_receive_queue, skb);
1720 sk->sk_data_ready(sk, skb->len);
1726 mutex_unlock(nlk->cb_mutex);
1728 netlink_destroy_callback(cb);
1732 mutex_unlock(nlk->cb_mutex);
1737 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1738 const struct nlmsghdr *nlh,
1739 struct netlink_dump_control *control)
1741 struct netlink_callback *cb;
1743 struct netlink_sock *nlk;
1746 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1750 cb->dump = control->dump;
1751 cb->done = control->done;
1753 cb->data = control->data;
1754 cb->min_dump_alloc = control->min_dump_alloc;
1755 atomic_inc(&skb->users);
1758 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1760 netlink_destroy_callback(cb);
1761 return -ECONNREFUSED;
1764 /* A dump is in progress... */
1765 mutex_lock(nlk->cb_mutex);
1767 mutex_unlock(nlk->cb_mutex);
1768 netlink_destroy_callback(cb);
1773 mutex_unlock(nlk->cb_mutex);
1775 ret = netlink_dump(sk);
1782 /* We successfully started a dump, by returning -EINTR we
1783 * signal not to send ACK even if it was requested.
1787 EXPORT_SYMBOL(netlink_dump_start);
1789 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1791 struct sk_buff *skb;
1792 struct nlmsghdr *rep;
1793 struct nlmsgerr *errmsg;
1794 size_t payload = sizeof(*errmsg);
1796 /* error messages get the original request appened */
1798 payload += nlmsg_len(nlh);
1800 skb = nlmsg_new(payload, GFP_KERNEL);
1804 sk = netlink_lookup(sock_net(in_skb->sk),
1805 in_skb->sk->sk_protocol,
1806 NETLINK_CB(in_skb).pid);
1808 sk->sk_err = ENOBUFS;
1809 sk->sk_error_report(sk);
1815 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1816 NLMSG_ERROR, payload, 0);
1817 errmsg = nlmsg_data(rep);
1818 errmsg->error = err;
1819 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1820 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1822 EXPORT_SYMBOL(netlink_ack);
1824 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1827 struct nlmsghdr *nlh;
1830 while (skb->len >= nlmsg_total_size(0)) {
1833 nlh = nlmsg_hdr(skb);
1836 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1839 /* Only requests are handled by the kernel */
1840 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1843 /* Skip control messages */
1844 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1852 if (nlh->nlmsg_flags & NLM_F_ACK || err)
1853 netlink_ack(skb, nlh, err);
1856 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1857 if (msglen > skb->len)
1859 skb_pull(skb, msglen);
1864 EXPORT_SYMBOL(netlink_rcv_skb);
1867 * nlmsg_notify - send a notification netlink message
1868 * @sk: netlink socket to use
1869 * @skb: notification message
1870 * @pid: destination netlink pid for reports or 0
1871 * @group: destination multicast group or 0
1872 * @report: 1 to report back, 0 to disable
1873 * @flags: allocation flags
1875 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1876 unsigned int group, int report, gfp_t flags)
1881 int exclude_pid = 0;
1884 atomic_inc(&skb->users);
1888 /* errors reported via destination sk->sk_err, but propagate
1889 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1890 err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1896 err2 = nlmsg_unicast(sk, skb, pid);
1897 if (!err || err == -ESRCH)
1903 EXPORT_SYMBOL(nlmsg_notify);
1905 #ifdef CONFIG_PROC_FS
1906 struct nl_seq_iter {
1907 struct seq_net_private p;
1912 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1914 struct nl_seq_iter *iter = seq->private;
1917 struct hlist_node *node;
1920 for (i = 0; i < MAX_LINKS; i++) {
1921 struct nl_pid_hash *hash = &nl_table[i].hash;
1923 for (j = 0; j <= hash->mask; j++) {
1924 sk_for_each(s, node, &hash->table[j]) {
1925 if (sock_net(s) != seq_file_net(seq))
1939 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1940 __acquires(nl_table_lock)
1942 read_lock(&nl_table_lock);
1943 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1946 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1949 struct nl_seq_iter *iter;
1954 if (v == SEQ_START_TOKEN)
1955 return netlink_seq_socket_idx(seq, 0);
1957 iter = seq->private;
1961 } while (s && sock_net(s) != seq_file_net(seq));
1966 j = iter->hash_idx + 1;
1969 struct nl_pid_hash *hash = &nl_table[i].hash;
1971 for (; j <= hash->mask; j++) {
1972 s = sk_head(&hash->table[j]);
1973 while (s && sock_net(s) != seq_file_net(seq))
1983 } while (++i < MAX_LINKS);
1988 static void netlink_seq_stop(struct seq_file *seq, void *v)
1989 __releases(nl_table_lock)
1991 read_unlock(&nl_table_lock);
1995 static int netlink_seq_show(struct seq_file *seq, void *v)
1997 if (v == SEQ_START_TOKEN)
1999 "sk Eth Pid Groups "
2000 "Rmem Wmem Dump Locks Drops Inode\n");
2003 struct netlink_sock *nlk = nlk_sk(s);
2005 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2009 nlk->groups ? (u32)nlk->groups[0] : 0,
2010 sk_rmem_alloc_get(s),
2011 sk_wmem_alloc_get(s),
2013 atomic_read(&s->sk_refcnt),
2014 atomic_read(&s->sk_drops),
2022 static const struct seq_operations netlink_seq_ops = {
2023 .start = netlink_seq_start,
2024 .next = netlink_seq_next,
2025 .stop = netlink_seq_stop,
2026 .show = netlink_seq_show,
2030 static int netlink_seq_open(struct inode *inode, struct file *file)
2032 return seq_open_net(inode, file, &netlink_seq_ops,
2033 sizeof(struct nl_seq_iter));
2036 static const struct file_operations netlink_seq_fops = {
2037 .owner = THIS_MODULE,
2038 .open = netlink_seq_open,
2040 .llseek = seq_lseek,
2041 .release = seq_release_net,
2046 int netlink_register_notifier(struct notifier_block *nb)
2048 return atomic_notifier_chain_register(&netlink_chain, nb);
2050 EXPORT_SYMBOL(netlink_register_notifier);
2052 int netlink_unregister_notifier(struct notifier_block *nb)
2054 return atomic_notifier_chain_unregister(&netlink_chain, nb);
2056 EXPORT_SYMBOL(netlink_unregister_notifier);
2058 static const struct proto_ops netlink_ops = {
2059 .family = PF_NETLINK,
2060 .owner = THIS_MODULE,
2061 .release = netlink_release,
2062 .bind = netlink_bind,
2063 .connect = netlink_connect,
2064 .socketpair = sock_no_socketpair,
2065 .accept = sock_no_accept,
2066 .getname = netlink_getname,
2067 .poll = datagram_poll,
2068 .ioctl = sock_no_ioctl,
2069 .listen = sock_no_listen,
2070 .shutdown = sock_no_shutdown,
2071 .setsockopt = netlink_setsockopt,
2072 .getsockopt = netlink_getsockopt,
2073 .sendmsg = netlink_sendmsg,
2074 .recvmsg = netlink_recvmsg,
2075 .mmap = sock_no_mmap,
2076 .sendpage = sock_no_sendpage,
2079 static const struct net_proto_family netlink_family_ops = {
2080 .family = PF_NETLINK,
2081 .create = netlink_create,
2082 .owner = THIS_MODULE, /* for consistency 8) */
2085 static int __net_init netlink_net_init(struct net *net)
2087 #ifdef CONFIG_PROC_FS
2088 if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
2094 static void __net_exit netlink_net_exit(struct net *net)
2096 #ifdef CONFIG_PROC_FS
2097 proc_net_remove(net, "netlink");
2101 static void __init netlink_add_usersock_entry(void)
2103 struct listeners *listeners;
2106 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2108 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2110 netlink_table_grab();
2112 nl_table[NETLINK_USERSOCK].groups = groups;
2113 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2114 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2115 nl_table[NETLINK_USERSOCK].registered = 1;
2117 netlink_table_ungrab();
2120 static struct pernet_operations __net_initdata netlink_net_ops = {
2121 .init = netlink_net_init,
2122 .exit = netlink_net_exit,
2125 static int __init netlink_proto_init(void)
2127 struct sk_buff *dummy_skb;
2129 unsigned long limit;
2131 int err = proto_register(&netlink_proto, 0);
2136 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
2138 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2142 if (totalram_pages >= (128 * 1024))
2143 limit = totalram_pages >> (21 - PAGE_SHIFT);
2145 limit = totalram_pages >> (23 - PAGE_SHIFT);
2147 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2148 limit = (1UL << order) / sizeof(struct hlist_head);
2149 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2151 for (i = 0; i < MAX_LINKS; i++) {
2152 struct nl_pid_hash *hash = &nl_table[i].hash;
2154 hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
2157 nl_pid_hash_free(nl_table[i].hash.table,
2158 1 * sizeof(*hash->table));
2162 hash->max_shift = order;
2165 hash->rehash_time = jiffies;
2168 netlink_add_usersock_entry();
2170 sock_register(&netlink_family_ops);
2171 register_pernet_subsys(&netlink_net_ops);
2172 /* The netlink device handler may be needed early. */
2177 panic("netlink_init: Cannot allocate nl_table\n");
2180 core_initcall(netlink_proto_init);