2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/config.h>
25 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/smp_lock.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
65 /* struct sock has to be the first member of netlink_sock */
72 wait_queue_head_t wait;
73 struct netlink_callback *cb;
75 void (*data_ready)(struct sock *sk, int bytes);
76 struct module *module;
80 #define NETLINK_KERNEL_SOCKET 0x1
82 static inline struct netlink_sock *nlk_sk(struct sock *sk)
84 return (struct netlink_sock *)sk;
88 struct hlist_head *table;
89 unsigned long rehash_time;
95 unsigned int max_shift;
100 struct netlink_table {
101 struct nl_pid_hash hash;
102 struct hlist_head mc_list;
103 unsigned int nl_nonroot;
104 struct module *module;
107 static struct netlink_table *nl_table;
109 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
111 static int netlink_dump(struct sock *sk);
112 static void netlink_destroy_callback(struct netlink_callback *cb);
114 static DEFINE_RWLOCK(nl_table_lock);
115 static atomic_t nl_table_users = ATOMIC_INIT(0);
117 static struct notifier_block *netlink_chain;
119 static u32 netlink_group_mask(u32 group)
121 return group ? 1 << (group - 1) : 0;
124 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
126 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
129 static void netlink_sock_destruct(struct sock *sk)
131 skb_queue_purge(&sk->sk_receive_queue);
133 if (!sock_flag(sk, SOCK_DEAD)) {
134 printk("Freeing alive netlink socket %p\n", sk);
137 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
138 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
139 BUG_TRAP(!nlk_sk(sk)->cb);
142 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
143 * Look, when several writers sleep and reader wakes them up, all but one
144 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
145 * this, _but_ remember, it adds useless work on UP machines.
148 static void netlink_table_grab(void)
150 write_lock_bh(&nl_table_lock);
152 if (atomic_read(&nl_table_users)) {
153 DECLARE_WAITQUEUE(wait, current);
155 add_wait_queue_exclusive(&nl_table_wait, &wait);
157 set_current_state(TASK_UNINTERRUPTIBLE);
158 if (atomic_read(&nl_table_users) == 0)
160 write_unlock_bh(&nl_table_lock);
162 write_lock_bh(&nl_table_lock);
165 __set_current_state(TASK_RUNNING);
166 remove_wait_queue(&nl_table_wait, &wait);
170 static __inline__ void netlink_table_ungrab(void)
172 write_unlock_bh(&nl_table_lock);
173 wake_up(&nl_table_wait);
176 static __inline__ void
177 netlink_lock_table(void)
179 /* read_lock() synchronizes us to netlink_table_grab */
181 read_lock(&nl_table_lock);
182 atomic_inc(&nl_table_users);
183 read_unlock(&nl_table_lock);
186 static __inline__ void
187 netlink_unlock_table(void)
189 if (atomic_dec_and_test(&nl_table_users))
190 wake_up(&nl_table_wait);
193 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
195 struct nl_pid_hash *hash = &nl_table[protocol].hash;
196 struct hlist_head *head;
198 struct hlist_node *node;
200 read_lock(&nl_table_lock);
201 head = nl_pid_hashfn(hash, pid);
202 sk_for_each(sk, node, head) {
203 if (nlk_sk(sk)->pid == pid) {
210 read_unlock(&nl_table_lock);
214 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
216 if (size <= PAGE_SIZE)
217 return kmalloc(size, GFP_ATOMIC);
219 return (struct hlist_head *)
220 __get_free_pages(GFP_ATOMIC, get_order(size));
223 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
225 if (size <= PAGE_SIZE)
228 free_pages((unsigned long)table, get_order(size));
231 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
233 unsigned int omask, mask, shift;
235 struct hlist_head *otable, *table;
238 omask = mask = hash->mask;
239 osize = size = (mask + 1) * sizeof(*table);
243 if (++shift > hash->max_shift)
249 table = nl_pid_hash_alloc(size);
253 memset(table, 0, size);
254 otable = hash->table;
258 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
260 for (i = 0; i <= omask; i++) {
262 struct hlist_node *node, *tmp;
264 sk_for_each_safe(sk, node, tmp, &otable[i])
265 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
268 nl_pid_hash_free(otable, osize);
269 hash->rehash_time = jiffies + 10 * 60 * HZ;
273 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
275 int avg = hash->entries >> hash->shift;
277 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
280 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
281 nl_pid_hash_rehash(hash, 0);
288 static struct proto_ops netlink_ops;
290 static int netlink_insert(struct sock *sk, u32 pid)
292 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
293 struct hlist_head *head;
294 int err = -EADDRINUSE;
296 struct hlist_node *node;
299 netlink_table_grab();
300 head = nl_pid_hashfn(hash, pid);
302 sk_for_each(osk, node, head) {
303 if (nlk_sk(osk)->pid == pid)
315 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
318 if (len && nl_pid_hash_dilute(hash, len))
319 head = nl_pid_hashfn(hash, pid);
321 nlk_sk(sk)->pid = pid;
322 sk_add_node(sk, head);
326 netlink_table_ungrab();
330 static void netlink_remove(struct sock *sk)
332 netlink_table_grab();
333 if (sk_del_node_init(sk))
334 nl_table[sk->sk_protocol].hash.entries--;
335 if (nlk_sk(sk)->groups)
336 __sk_del_bind_node(sk);
337 netlink_table_ungrab();
340 static struct proto netlink_proto = {
342 .owner = THIS_MODULE,
343 .obj_size = sizeof(struct netlink_sock),
346 static int netlink_create(struct socket *sock, int protocol)
349 struct netlink_sock *nlk;
350 struct module *module;
352 sock->state = SS_UNCONNECTED;
354 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
355 return -ESOCKTNOSUPPORT;
357 if (protocol<0 || protocol >= MAX_LINKS)
358 return -EPROTONOSUPPORT;
360 netlink_lock_table();
361 if (!nl_table[protocol].hash.entries) {
363 /* We do 'best effort'. If we find a matching module,
364 * it is loaded. If not, we don't return an error to
365 * allow pure userspace<->userspace communication. -HW
367 netlink_unlock_table();
368 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
369 netlink_lock_table();
372 module = nl_table[protocol].module;
373 if (!try_module_get(module))
375 netlink_unlock_table();
377 sock->ops = &netlink_ops;
379 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
385 sock_init_data(sock, sk);
389 nlk->module = module;
390 spin_lock_init(&nlk->cb_lock);
391 init_waitqueue_head(&nlk->wait);
392 sk->sk_destruct = netlink_sock_destruct;
394 sk->sk_protocol = protocol;
398 static int netlink_release(struct socket *sock)
400 struct sock *sk = sock->sk;
401 struct netlink_sock *nlk;
409 spin_lock(&nlk->cb_lock);
411 nlk->cb->done(nlk->cb);
412 netlink_destroy_callback(nlk->cb);
415 spin_unlock(&nlk->cb_lock);
417 /* OK. Socket is unlinked, and, therefore,
418 no new packets will arrive */
422 wake_up_interruptible_all(&nlk->wait);
424 skb_queue_purge(&sk->sk_write_queue);
426 if (nlk->pid && !nlk->groups) {
427 struct netlink_notify n = {
428 .protocol = sk->sk_protocol,
431 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
435 module_put(nlk->module);
437 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
438 netlink_table_grab();
439 nl_table[sk->sk_protocol].module = NULL;
440 netlink_table_ungrab();
447 static int netlink_autobind(struct socket *sock)
449 struct sock *sk = sock->sk;
450 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
451 struct hlist_head *head;
453 struct hlist_node *node;
454 s32 pid = current->pid;
456 static s32 rover = -4097;
460 netlink_table_grab();
461 head = nl_pid_hashfn(hash, pid);
462 sk_for_each(osk, node, head) {
463 if (nlk_sk(osk)->pid == pid) {
464 /* Bind collision, search negative pid values. */
468 netlink_table_ungrab();
472 netlink_table_ungrab();
474 err = netlink_insert(sk, pid);
475 if (err == -EADDRINUSE)
478 /* If 2 threads race to autobind, that is fine. */
485 static inline int netlink_capable(struct socket *sock, unsigned int flag)
487 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
488 capable(CAP_NET_ADMIN);
491 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
493 struct sock *sk = sock->sk;
494 struct netlink_sock *nlk = nlk_sk(sk);
495 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
498 if (nladdr->nl_family != AF_NETLINK)
501 /* Only superuser is allowed to listen multicasts */
502 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
506 if (nladdr->nl_pid != nlk->pid)
509 err = nladdr->nl_pid ?
510 netlink_insert(sk, nladdr->nl_pid) :
511 netlink_autobind(sock);
516 if (!nladdr->nl_groups && !nlk->groups)
519 netlink_table_grab();
520 if (nlk->groups && !nladdr->nl_groups)
521 __sk_del_bind_node(sk);
522 else if (!nlk->groups && nladdr->nl_groups)
523 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
524 nlk->groups = nladdr->nl_groups;
525 netlink_table_ungrab();
530 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
534 struct sock *sk = sock->sk;
535 struct netlink_sock *nlk = nlk_sk(sk);
536 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
538 if (addr->sa_family == AF_UNSPEC) {
539 sk->sk_state = NETLINK_UNCONNECTED;
544 if (addr->sa_family != AF_NETLINK)
547 /* Only superuser is allowed to send multicasts */
548 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
552 err = netlink_autobind(sock);
555 sk->sk_state = NETLINK_CONNECTED;
556 nlk->dst_pid = nladdr->nl_pid;
557 nlk->dst_group = ffs(nladdr->nl_groups);
563 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
565 struct sock *sk = sock->sk;
566 struct netlink_sock *nlk = nlk_sk(sk);
567 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
569 nladdr->nl_family = AF_NETLINK;
571 *addr_len = sizeof(*nladdr);
574 nladdr->nl_pid = nlk->dst_pid;
575 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
577 nladdr->nl_pid = nlk->pid;
578 nladdr->nl_groups = nlk->groups;
583 static void netlink_overrun(struct sock *sk)
585 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
586 sk->sk_err = ENOBUFS;
587 sk->sk_error_report(sk);
591 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
593 int protocol = ssk->sk_protocol;
595 struct netlink_sock *nlk;
597 sock = netlink_lookup(protocol, pid);
599 return ERR_PTR(-ECONNREFUSED);
601 /* Don't bother queuing skb if kernel socket has no input function */
603 if ((nlk->pid == 0 && !nlk->data_ready) ||
604 (sock->sk_state == NETLINK_CONNECTED &&
605 nlk->dst_pid != nlk_sk(ssk)->pid)) {
607 return ERR_PTR(-ECONNREFUSED);
612 struct sock *netlink_getsockbyfilp(struct file *filp)
614 struct inode *inode = filp->f_dentry->d_inode;
617 if (!S_ISSOCK(inode->i_mode))
618 return ERR_PTR(-ENOTSOCK);
620 sock = SOCKET_I(inode)->sk;
621 if (sock->sk_family != AF_NETLINK)
622 return ERR_PTR(-EINVAL);
629 * Attach a skb to a netlink socket.
630 * The caller must hold a reference to the destination socket. On error, the
631 * reference is dropped. The skb is not send to the destination, just all
632 * all error checks are performed and memory in the queue is reserved.
634 * < 0: error. skb freed, reference to sock dropped.
636 * 1: repeat lookup - reference dropped while waiting for socket memory.
638 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
640 struct netlink_sock *nlk;
644 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
645 test_bit(0, &nlk->state)) {
646 DECLARE_WAITQUEUE(wait, current);
655 __set_current_state(TASK_INTERRUPTIBLE);
656 add_wait_queue(&nlk->wait, &wait);
658 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
659 test_bit(0, &nlk->state)) &&
660 !sock_flag(sk, SOCK_DEAD))
661 timeo = schedule_timeout(timeo);
663 __set_current_state(TASK_RUNNING);
664 remove_wait_queue(&nlk->wait, &wait);
667 if (signal_pending(current)) {
669 return sock_intr_errno(timeo);
673 skb_set_owner_r(skb, sk);
677 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
679 struct netlink_sock *nlk;
684 skb_queue_tail(&sk->sk_receive_queue, skb);
685 sk->sk_data_ready(sk, len);
690 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
696 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
697 unsigned int __nocast allocation)
703 delta = skb->end - skb->tail;
704 if (delta * 2 < skb->truesize)
707 if (skb_shared(skb)) {
708 struct sk_buff *nskb = skb_clone(skb, allocation);
715 if (!pskb_expand_head(skb, 0, -delta, allocation))
716 skb->truesize -= delta;
721 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
727 skb = netlink_trim(skb, gfp_any());
729 timeo = sock_sndtimeo(ssk, nonblock);
731 sk = netlink_getsockbypid(ssk, pid);
736 err = netlink_attachskb(sk, skb, nonblock, timeo);
742 return netlink_sendskb(sk, skb, ssk->sk_protocol);
745 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
747 struct netlink_sock *nlk = nlk_sk(sk);
749 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
750 !test_bit(0, &nlk->state)) {
751 skb_set_owner_r(skb, sk);
752 skb_queue_tail(&sk->sk_receive_queue, skb);
753 sk->sk_data_ready(sk, skb->len);
754 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
759 struct netlink_broadcast_data {
760 struct sock *exclude_sk;
766 unsigned int allocation;
767 struct sk_buff *skb, *skb2;
770 static inline int do_one_broadcast(struct sock *sk,
771 struct netlink_broadcast_data *p)
773 struct netlink_sock *nlk = nlk_sk(sk);
776 if (p->exclude_sk == sk)
779 if (nlk->pid == p->pid || !(nlk->groups & netlink_group_mask(p->group)))
788 if (p->skb2 == NULL) {
789 if (skb_shared(p->skb)) {
790 p->skb2 = skb_clone(p->skb, p->allocation);
792 p->skb2 = skb_get(p->skb);
794 * skb ownership may have been set when
795 * delivered to a previous socket.
800 if (p->skb2 == NULL) {
802 /* Clone failed. Notify ALL listeners. */
804 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
817 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
818 u32 group, int allocation)
820 struct netlink_broadcast_data info;
821 struct hlist_node *node;
824 skb = netlink_trim(skb, allocation);
826 info.exclude_sk = ssk;
832 info.allocation = allocation;
836 /* While we sleep in clone, do not allow to change socket list */
838 netlink_lock_table();
840 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
841 do_one_broadcast(sk, &info);
845 netlink_unlock_table();
848 kfree_skb(info.skb2);
850 if (info.delivered) {
851 if (info.congested && (allocation & __GFP_WAIT))
860 struct netlink_set_err_data {
861 struct sock *exclude_sk;
867 static inline int do_one_set_err(struct sock *sk,
868 struct netlink_set_err_data *p)
870 struct netlink_sock *nlk = nlk_sk(sk);
872 if (sk == p->exclude_sk)
875 if (nlk->pid == p->pid || !(nlk->groups & netlink_group_mask(p->group)))
878 sk->sk_err = p->code;
879 sk->sk_error_report(sk);
884 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
886 struct netlink_set_err_data info;
887 struct hlist_node *node;
890 info.exclude_sk = ssk;
895 read_lock(&nl_table_lock);
897 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
898 do_one_set_err(sk, &info);
900 read_unlock(&nl_table_lock);
903 static inline void netlink_rcv_wake(struct sock *sk)
905 struct netlink_sock *nlk = nlk_sk(sk);
907 if (skb_queue_empty(&sk->sk_receive_queue))
908 clear_bit(0, &nlk->state);
909 if (!test_bit(0, &nlk->state))
910 wake_up_interruptible(&nlk->wait);
913 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
914 struct msghdr *msg, size_t len)
916 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
917 struct sock *sk = sock->sk;
918 struct netlink_sock *nlk = nlk_sk(sk);
919 struct sockaddr_nl *addr=msg->msg_name;
924 struct scm_cookie scm;
926 if (msg->msg_flags&MSG_OOB)
929 if (NULL == siocb->scm)
931 err = scm_send(sock, msg, siocb->scm);
935 if (msg->msg_namelen) {
936 if (addr->nl_family != AF_NETLINK)
938 dst_pid = addr->nl_pid;
939 dst_group = ffs(addr->nl_groups);
940 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
943 dst_pid = nlk->dst_pid;
944 dst_group = nlk->dst_group;
948 err = netlink_autobind(sock);
954 if (len > sk->sk_sndbuf - 32)
957 skb = alloc_skb(len, GFP_KERNEL);
961 NETLINK_CB(skb).pid = nlk->pid;
962 NETLINK_CB(skb).dst_pid = dst_pid;
963 NETLINK_CB(skb).dst_group = dst_group;
964 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
965 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
967 /* What can I do? Netlink is asynchronous, so that
968 we will have to save current capabilities to
969 check them, when this message will be delivered
970 to corresponding kernel module. --ANK (980802)
974 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
979 err = security_netlink_send(sk, skb);
986 atomic_inc(&skb->users);
987 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
989 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
995 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
996 struct msghdr *msg, size_t len,
999 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1000 struct scm_cookie scm;
1001 struct sock *sk = sock->sk;
1002 struct netlink_sock *nlk = nlk_sk(sk);
1003 int noblock = flags&MSG_DONTWAIT;
1005 struct sk_buff *skb;
1013 skb = skb_recv_datagram(sk,flags,noblock,&err);
1017 msg->msg_namelen = 0;
1021 msg->msg_flags |= MSG_TRUNC;
1025 skb->h.raw = skb->data;
1026 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1028 if (msg->msg_name) {
1029 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1030 addr->nl_family = AF_NETLINK;
1032 addr->nl_pid = NETLINK_CB(skb).pid;
1033 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1034 msg->msg_namelen = sizeof(*addr);
1037 if (NULL == siocb->scm) {
1038 memset(&scm, 0, sizeof(scm));
1041 siocb->scm->creds = *NETLINK_CREDS(skb);
1042 skb_free_datagram(sk, skb);
1044 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1047 scm_recv(sock, msg, siocb->scm, flags);
1050 netlink_rcv_wake(sk);
1051 return err ? : copied;
1054 static void netlink_data_ready(struct sock *sk, int len)
1056 struct netlink_sock *nlk = nlk_sk(sk);
1058 if (nlk->data_ready)
1059 nlk->data_ready(sk, len);
1060 netlink_rcv_wake(sk);
1064 * We export these functions to other modules. They provide a
1065 * complete set of kernel non-blocking support for message
1070 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len), struct module *module)
1072 struct socket *sock;
1074 struct netlink_sock *nlk;
1079 if (unit<0 || unit>=MAX_LINKS)
1082 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1085 if (netlink_create(sock, unit) < 0)
1086 goto out_sock_release;
1089 sk->sk_data_ready = netlink_data_ready;
1091 nlk_sk(sk)->data_ready = input;
1093 if (netlink_insert(sk, 0))
1094 goto out_sock_release;
1097 nlk->flags |= NETLINK_KERNEL_SOCKET;
1099 netlink_table_grab();
1100 nl_table[unit].module = module;
1101 netlink_table_ungrab();
1110 void netlink_set_nonroot(int protocol, unsigned int flags)
1112 if ((unsigned int)protocol < MAX_LINKS)
1113 nl_table[protocol].nl_nonroot = flags;
1116 static void netlink_destroy_callback(struct netlink_callback *cb)
1124 * It looks a bit ugly.
1125 * It would be better to create kernel thread.
1128 static int netlink_dump(struct sock *sk)
1130 struct netlink_sock *nlk = nlk_sk(sk);
1131 struct netlink_callback *cb;
1132 struct sk_buff *skb;
1133 struct nlmsghdr *nlh;
1136 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1140 spin_lock(&nlk->cb_lock);
1144 spin_unlock(&nlk->cb_lock);
1149 len = cb->dump(skb, cb);
1152 spin_unlock(&nlk->cb_lock);
1153 skb_queue_tail(&sk->sk_receive_queue, skb);
1154 sk->sk_data_ready(sk, len);
1158 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1159 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1160 skb_queue_tail(&sk->sk_receive_queue, skb);
1161 sk->sk_data_ready(sk, skb->len);
1165 spin_unlock(&nlk->cb_lock);
1167 netlink_destroy_callback(cb);
1174 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1175 struct nlmsghdr *nlh,
1176 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1177 int (*done)(struct netlink_callback*))
1179 struct netlink_callback *cb;
1181 struct netlink_sock *nlk;
1183 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1187 memset(cb, 0, sizeof(*cb));
1191 atomic_inc(&skb->users);
1194 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1196 netlink_destroy_callback(cb);
1197 return -ECONNREFUSED;
1200 /* A dump is in progress... */
1201 spin_lock(&nlk->cb_lock);
1203 spin_unlock(&nlk->cb_lock);
1204 netlink_destroy_callback(cb);
1209 spin_unlock(&nlk->cb_lock);
1216 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1218 struct sk_buff *skb;
1219 struct nlmsghdr *rep;
1220 struct nlmsgerr *errmsg;
1224 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1226 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1228 skb = alloc_skb(size, GFP_KERNEL);
1232 sk = netlink_lookup(in_skb->sk->sk_protocol,
1233 NETLINK_CB(in_skb).pid);
1235 sk->sk_err = ENOBUFS;
1236 sk->sk_error_report(sk);
1242 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1243 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1244 errmsg = NLMSG_DATA(rep);
1245 errmsg->error = err;
1246 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1247 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1251 #ifdef CONFIG_PROC_FS
1252 struct nl_seq_iter {
1257 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1259 struct nl_seq_iter *iter = seq->private;
1262 struct hlist_node *node;
1265 for (i=0; i<MAX_LINKS; i++) {
1266 struct nl_pid_hash *hash = &nl_table[i].hash;
1268 for (j = 0; j <= hash->mask; j++) {
1269 sk_for_each(s, node, &hash->table[j]) {
1282 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1284 read_lock(&nl_table_lock);
1285 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1288 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1291 struct nl_seq_iter *iter;
1296 if (v == SEQ_START_TOKEN)
1297 return netlink_seq_socket_idx(seq, 0);
1303 iter = seq->private;
1305 j = iter->hash_idx + 1;
1308 struct nl_pid_hash *hash = &nl_table[i].hash;
1310 for (; j <= hash->mask; j++) {
1311 s = sk_head(&hash->table[j]);
1320 } while (++i < MAX_LINKS);
1325 static void netlink_seq_stop(struct seq_file *seq, void *v)
1327 read_unlock(&nl_table_lock);
1331 static int netlink_seq_show(struct seq_file *seq, void *v)
1333 if (v == SEQ_START_TOKEN)
1335 "sk Eth Pid Groups "
1336 "Rmem Wmem Dump Locks\n");
1339 struct netlink_sock *nlk = nlk_sk(s);
1341 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1346 atomic_read(&s->sk_rmem_alloc),
1347 atomic_read(&s->sk_wmem_alloc),
1349 atomic_read(&s->sk_refcnt)
1356 static struct seq_operations netlink_seq_ops = {
1357 .start = netlink_seq_start,
1358 .next = netlink_seq_next,
1359 .stop = netlink_seq_stop,
1360 .show = netlink_seq_show,
1364 static int netlink_seq_open(struct inode *inode, struct file *file)
1366 struct seq_file *seq;
1367 struct nl_seq_iter *iter;
1370 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1374 err = seq_open(file, &netlink_seq_ops);
1380 memset(iter, 0, sizeof(*iter));
1381 seq = file->private_data;
1382 seq->private = iter;
1386 static struct file_operations netlink_seq_fops = {
1387 .owner = THIS_MODULE,
1388 .open = netlink_seq_open,
1390 .llseek = seq_lseek,
1391 .release = seq_release_private,
1396 int netlink_register_notifier(struct notifier_block *nb)
1398 return notifier_chain_register(&netlink_chain, nb);
1401 int netlink_unregister_notifier(struct notifier_block *nb)
1403 return notifier_chain_unregister(&netlink_chain, nb);
1406 static struct proto_ops netlink_ops = {
1407 .family = PF_NETLINK,
1408 .owner = THIS_MODULE,
1409 .release = netlink_release,
1410 .bind = netlink_bind,
1411 .connect = netlink_connect,
1412 .socketpair = sock_no_socketpair,
1413 .accept = sock_no_accept,
1414 .getname = netlink_getname,
1415 .poll = datagram_poll,
1416 .ioctl = sock_no_ioctl,
1417 .listen = sock_no_listen,
1418 .shutdown = sock_no_shutdown,
1419 .setsockopt = sock_no_setsockopt,
1420 .getsockopt = sock_no_getsockopt,
1421 .sendmsg = netlink_sendmsg,
1422 .recvmsg = netlink_recvmsg,
1423 .mmap = sock_no_mmap,
1424 .sendpage = sock_no_sendpage,
1427 static struct net_proto_family netlink_family_ops = {
1428 .family = PF_NETLINK,
1429 .create = netlink_create,
1430 .owner = THIS_MODULE, /* for consistency 8) */
1433 extern void netlink_skb_parms_too_large(void);
1435 static int __init netlink_proto_init(void)
1437 struct sk_buff *dummy_skb;
1441 int err = proto_register(&netlink_proto, 0);
1446 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1447 netlink_skb_parms_too_large();
1449 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1452 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1456 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1458 if (num_physpages >= (128 * 1024))
1459 max = num_physpages >> (21 - PAGE_SHIFT);
1461 max = num_physpages >> (23 - PAGE_SHIFT);
1463 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1464 max = (1UL << order) / sizeof(struct hlist_head);
1465 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1467 for (i = 0; i < MAX_LINKS; i++) {
1468 struct nl_pid_hash *hash = &nl_table[i].hash;
1470 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1473 nl_pid_hash_free(nl_table[i].hash.table,
1474 1 * sizeof(*hash->table));
1478 memset(hash->table, 0, 1 * sizeof(*hash->table));
1479 hash->max_shift = order;
1482 hash->rehash_time = jiffies;
1485 sock_register(&netlink_family_ops);
1486 #ifdef CONFIG_PROC_FS
1487 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1489 /* The netlink device handler may be needed early. */
1495 core_initcall(netlink_proto_init);
1497 EXPORT_SYMBOL(netlink_ack);
1498 EXPORT_SYMBOL(netlink_broadcast);
1499 EXPORT_SYMBOL(netlink_dump_start);
1500 EXPORT_SYMBOL(netlink_kernel_create);
1501 EXPORT_SYMBOL(netlink_register_notifier);
1502 EXPORT_SYMBOL(netlink_set_err);
1503 EXPORT_SYMBOL(netlink_set_nonroot);
1504 EXPORT_SYMBOL(netlink_unicast);
1505 EXPORT_SYMBOL(netlink_unregister_notifier);