2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
92 #include <linux/capability.h>
93 #include <linux/errno.h>
94 #include <linux/types.h>
95 #include <linux/socket.h>
97 #include <linux/kernel.h>
98 #include <linux/module.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/sched.h>
102 #include <linux/timer.h>
103 #include <linux/string.h>
104 #include <linux/sockios.h>
105 #include <linux/net.h>
106 #include <linux/mm.h>
107 #include <linux/slab.h>
108 #include <linux/interrupt.h>
109 #include <linux/poll.h>
110 #include <linux/tcp.h>
111 #include <linux/init.h>
112 #include <linux/highmem.h>
113 #include <linux/user_namespace.h>
115 #include <asm/uaccess.h>
116 #include <asm/system.h>
118 #include <linux/netdevice.h>
119 #include <net/protocol.h>
120 #include <linux/skbuff.h>
121 #include <net/net_namespace.h>
122 #include <net/request_sock.h>
123 #include <net/sock.h>
124 #include <linux/net_tstamp.h>
125 #include <net/xfrm.h>
126 #include <linux/ipsec.h>
127 #include <net/cls_cgroup.h>
129 #include <linux/filter.h>
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
139 static struct lock_class_key af_family_keys[AF_MAX];
140 static struct lock_class_key af_family_slock_keys[AF_MAX];
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
147 static const char *const af_family_key_strings[AF_MAX+1] = {
148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
163 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
179 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
200 static struct lock_class_key af_callback_keys[AF_MAX];
202 /* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms. This makes socket queueing behavior and performance
205 * not depend upon such differences.
207 #define _SK_MEM_PACKETS 256
208 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
209 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
212 /* Run time adjustable parameters. */
213 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
218 /* Maximal space eaten by iovec or ancilliary data plus some space */
219 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
220 EXPORT_SYMBOL(sysctl_optmem_max);
222 #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223 int net_cls_subsys_id = -1;
224 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
227 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
231 if (optlen < sizeof(tv))
233 if (copy_from_user(&tv, optval, sizeof(tv)))
235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
239 static int warned __read_mostly;
242 if (warned < 10 && net_ratelimit()) {
244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245 "tries to set negative timeout\n",
246 current->comm, task_pid_nr(current));
250 *timeo_p = MAX_SCHEDULE_TIMEOUT;
251 if (tv.tv_sec == 0 && tv.tv_usec == 0)
253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
258 static void sock_warn_obsolete_bsdism(const char *name)
261 static char warncomm[TASK_COMM_LEN];
262 if (strcmp(warncomm, current->comm) && warned < 5) {
263 strcpy(warncomm, current->comm);
264 printk(KERN_WARNING "process `%s' is using obsolete "
265 "%s SO_BSDCOMPAT\n", warncomm, name);
270 static void sock_disable_timestamp(struct sock *sk, int flag)
272 if (sock_flag(sk, flag)) {
273 sock_reset_flag(sk, flag);
274 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276 net_disable_timestamp();
282 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
287 struct sk_buff_head *list = &sk->sk_receive_queue;
289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
290 number of warnings when compiling with -W --ANK
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) {
294 atomic_inc(&sk->sk_drops);
298 err = sk_filter(sk, skb);
302 if (!sk_rmem_schedule(sk, skb->truesize)) {
303 atomic_inc(&sk->sk_drops);
308 skb_set_owner_r(skb, sk);
310 /* Cache the SKB length before we tack it onto the receive
311 * queue. Once it is added it no longer belongs to us and
312 * may be freed by other threads of control pulling packets
317 /* we escape from rcu protected region, make sure we dont leak
322 spin_lock_irqsave(&list->lock, flags);
323 skb->dropcount = atomic_read(&sk->sk_drops);
324 __skb_queue_tail(list, skb);
325 spin_unlock_irqrestore(&list->lock, flags);
327 if (!sock_flag(sk, SOCK_DEAD))
328 sk->sk_data_ready(sk, skb_len);
331 EXPORT_SYMBOL(sock_queue_rcv_skb);
333 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
335 int rc = NET_RX_SUCCESS;
337 if (sk_filter(sk, skb))
338 goto discard_and_relse;
342 if (sk_rcvqueues_full(sk, skb)) {
343 atomic_inc(&sk->sk_drops);
344 goto discard_and_relse;
347 bh_lock_sock_nested(sk);
350 if (!sock_owned_by_user(sk)) {
352 * trylock + unlock semantics:
354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
356 rc = sk_backlog_rcv(sk, skb);
358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
359 } else if (sk_add_backlog(sk, skb)) {
361 atomic_inc(&sk->sk_drops);
362 goto discard_and_relse;
373 EXPORT_SYMBOL(sk_receive_skb);
375 void sk_reset_txq(struct sock *sk)
377 sk_tx_queue_clear(sk);
379 EXPORT_SYMBOL(sk_reset_txq);
381 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
383 struct dst_entry *dst = __sk_dst_get(sk);
385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
386 sk_tx_queue_clear(sk);
387 rcu_assign_pointer(sk->sk_dst_cache, NULL);
394 EXPORT_SYMBOL(__sk_dst_check);
396 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
398 struct dst_entry *dst = sk_dst_get(sk);
400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
408 EXPORT_SYMBOL(sk_dst_check);
410 static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
412 int ret = -ENOPROTOOPT;
413 #ifdef CONFIG_NETDEVICES
414 struct net *net = sock_net(sk);
415 char devname[IFNAMSIZ];
420 if (!capable(CAP_NET_RAW))
427 /* Bind this socket to a particular device like "eth0",
428 * as specified in the passed interface name. If the
429 * name is "" or the option length is zero the socket
432 if (optlen > IFNAMSIZ - 1)
433 optlen = IFNAMSIZ - 1;
434 memset(devname, 0, sizeof(devname));
437 if (copy_from_user(devname, optval, optlen))
441 if (devname[0] != '\0') {
442 struct net_device *dev;
445 dev = dev_get_by_name_rcu(net, devname);
447 index = dev->ifindex;
455 sk->sk_bound_dev_if = index;
467 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
470 sock_set_flag(sk, bit);
472 sock_reset_flag(sk, bit);
476 * This is meant for all protocols to use and covers goings on
477 * at the socket level. Everything here is generic.
480 int sock_setsockopt(struct socket *sock, int level, int optname,
481 char __user *optval, unsigned int optlen)
483 struct sock *sk = sock->sk;
490 * Options without arguments
493 if (optname == SO_BINDTODEVICE)
494 return sock_bindtodevice(sk, optval, optlen);
496 if (optlen < sizeof(int))
499 if (get_user(val, (int __user *)optval))
502 valbool = val ? 1 : 0;
508 if (val && !capable(CAP_NET_ADMIN))
511 sock_valbool_flag(sk, SOCK_DBG, valbool);
514 sk->sk_reuse = valbool;
523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
529 /* Don't error on this BSD doesn't and if you think
530 about it this is right. Otherwise apps have to
531 play 'guess the biggest size' games. RCVBUF/SNDBUF
532 are treated in BSD as hints */
534 if (val > sysctl_wmem_max)
535 val = sysctl_wmem_max;
537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538 if ((val * 2) < SOCK_MIN_SNDBUF)
539 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
541 sk->sk_sndbuf = val * 2;
544 * Wake up sending tasks if we
547 sk->sk_write_space(sk);
551 if (!capable(CAP_NET_ADMIN)) {
558 /* Don't error on this BSD doesn't and if you think
559 about it this is right. Otherwise apps have to
560 play 'guess the biggest size' games. RCVBUF/SNDBUF
561 are treated in BSD as hints */
563 if (val > sysctl_rmem_max)
564 val = sysctl_rmem_max;
566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
568 * We double it on the way in to account for
569 * "struct sk_buff" etc. overhead. Applications
570 * assume that the SO_RCVBUF setting they make will
571 * allow that much actual data to be received on that
574 * Applications are unaware that "struct sk_buff" and
575 * other overheads allocate from the receive buffer
576 * during socket buffer allocation.
578 * And after considering the possible alternatives,
579 * returning the value we actually used in getsockopt
580 * is the most desirable behavior.
582 if ((val * 2) < SOCK_MIN_RCVBUF)
583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
585 sk->sk_rcvbuf = val * 2;
589 if (!capable(CAP_NET_ADMIN)) {
597 if (sk->sk_protocol == IPPROTO_TCP)
598 tcp_set_keepalive(sk, valbool);
600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
608 sk->sk_no_check = valbool;
612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613 sk->sk_priority = val;
619 if (optlen < sizeof(ling)) {
620 ret = -EINVAL; /* 1003.1g */
623 if (copy_from_user(&ling, optval, sizeof(ling))) {
628 sock_reset_flag(sk, SOCK_LINGER);
630 #if (BITS_PER_LONG == 32)
631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636 sock_set_flag(sk, SOCK_LINGER);
641 sock_warn_obsolete_bsdism("setsockopt");
646 set_bit(SOCK_PASSCRED, &sock->flags);
648 clear_bit(SOCK_PASSCRED, &sock->flags);
654 if (optname == SO_TIMESTAMP)
655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
657 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
658 sock_set_flag(sk, SOCK_RCVTSTAMP);
659 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
661 sock_reset_flag(sk, SOCK_RCVTSTAMP);
662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
666 case SO_TIMESTAMPING:
667 if (val & ~SOF_TIMESTAMPING_MASK) {
671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672 val & SOF_TIMESTAMPING_TX_HARDWARE);
673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674 val & SOF_TIMESTAMPING_TX_SOFTWARE);
675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676 val & SOF_TIMESTAMPING_RX_HARDWARE);
677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678 sock_enable_timestamp(sk,
679 SOCK_TIMESTAMPING_RX_SOFTWARE);
681 sock_disable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684 val & SOF_TIMESTAMPING_SOFTWARE);
685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686 val & SOF_TIMESTAMPING_SYS_HARDWARE);
687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688 val & SOF_TIMESTAMPING_RAW_HARDWARE);
694 sk->sk_rcvlowat = val ? : 1;
698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
705 case SO_ATTACH_FILTER:
707 if (optlen == sizeof(struct sock_fprog)) {
708 struct sock_fprog fprog;
711 if (copy_from_user(&fprog, optval, sizeof(fprog)))
714 ret = sk_attach_filter(&fprog, sk);
718 case SO_DETACH_FILTER:
719 ret = sk_detach_filter(sk);
724 set_bit(SOCK_PASSSEC, &sock->flags);
726 clear_bit(SOCK_PASSSEC, &sock->flags);
729 if (!capable(CAP_NET_ADMIN))
735 /* We implement the SO_SNDLOWAT etc to
736 not be settable (1003.1g 5.3) */
739 sock_set_flag(sk, SOCK_RXQ_OVFL);
741 sock_reset_flag(sk, SOCK_RXQ_OVFL);
750 EXPORT_SYMBOL(sock_setsockopt);
753 void cred_to_ucred(struct pid *pid, const struct cred *cred,
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
759 struct user_namespace *current_ns = current_user_ns();
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
765 EXPORT_SYMBOL_GPL(cred_to_ucred);
767 int sock_getsockopt(struct socket *sock, int level, int optname,
768 char __user *optval, int __user *optlen)
770 struct sock *sk = sock->sk;
778 int lv = sizeof(int);
781 if (get_user(len, optlen))
786 memset(&v, 0, sizeof(v));
790 v.val = sock_flag(sk, SOCK_DBG);
794 v.val = sock_flag(sk, SOCK_LOCALROUTE);
798 v.val = !!sock_flag(sk, SOCK_BROADCAST);
802 v.val = sk->sk_sndbuf;
806 v.val = sk->sk_rcvbuf;
810 v.val = sk->sk_reuse;
814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
822 v.val = sk->sk_protocol;
826 v.val = sk->sk_family;
830 v.val = -sock_error(sk);
832 v.val = xchg(&sk->sk_err_soft, 0);
836 v.val = !!sock_flag(sk, SOCK_URGINLINE);
840 v.val = sk->sk_no_check;
844 v.val = sk->sk_priority;
849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
850 v.ling.l_linger = sk->sk_lingertime / HZ;
854 sock_warn_obsolete_bsdism("getsockopt");
858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
859 !sock_flag(sk, SOCK_RCVTSTAMPNS);
863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
866 case SO_TIMESTAMPING:
868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
877 v.val |= SOF_TIMESTAMPING_SOFTWARE;
878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
885 lv = sizeof(struct timeval);
886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
896 lv = sizeof(struct timeval);
897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
901 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
907 v.val = sk->sk_rcvlowat;
915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
920 struct ucred peercred;
921 if (len > sizeof(peercred))
922 len = sizeof(peercred);
923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924 if (copy_to_user(optval, &peercred, len))
933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
937 if (copy_to_user(optval, address, len))
942 /* Dubious BSD thing... Probably nobody even uses it, but
943 * the UNIX standard wants it for whatever reason... -DaveM
946 v.val = sk->sk_state == TCP_LISTEN;
950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
954 return security_socket_getpeersec_stream(sock, optval, optlen, len);
961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
970 if (copy_to_user(optval, &v, len))
973 if (put_user(len, optlen))
979 * Initialize an sk_lock.
981 * (We also register the sk_lock with the lock validator.)
983 static inline void sock_lock_init(struct sock *sk)
985 sock_lock_init_class_and_name(sk,
986 af_family_slock_key_strings[sk->sk_family],
987 af_family_slock_keys + sk->sk_family,
988 af_family_key_strings[sk->sk_family],
989 af_family_keys + sk->sk_family);
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
996 static void sock_copy(struct sock *nsk, const struct sock *osk)
998 #ifdef CONFIG_SECURITY_NETWORK
999 void *sptr = nsk->sk_security;
1001 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
1002 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
1003 sizeof(osk->sk_tx_queue_mapping));
1004 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
1006 #ifdef CONFIG_SECURITY_NETWORK
1007 nsk->sk_security = sptr;
1008 security_sk_clone(osk, nsk);
1013 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1014 * un-modified. Special care is taken when initializing object to zero.
1016 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1018 if (offsetof(struct sock, sk_node.next) != 0)
1019 memset(sk, 0, offsetof(struct sock, sk_node.next));
1020 memset(&sk->sk_node.pprev, 0,
1021 size - offsetof(struct sock, sk_node.pprev));
1024 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1026 unsigned long nulls1, nulls2;
1028 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1029 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1030 if (nulls1 > nulls2)
1031 swap(nulls1, nulls2);
1034 memset((char *)sk, 0, nulls1);
1035 memset((char *)sk + nulls1 + sizeof(void *), 0,
1036 nulls2 - nulls1 - sizeof(void *));
1037 memset((char *)sk + nulls2 + sizeof(void *), 0,
1038 size - nulls2 - sizeof(void *));
1040 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1042 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1046 struct kmem_cache *slab;
1050 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1053 if (priority & __GFP_ZERO) {
1055 prot->clear_sk(sk, prot->obj_size);
1057 sk_prot_clear_nulls(sk, prot->obj_size);
1060 sk = kmalloc(prot->obj_size, priority);
1063 kmemcheck_annotate_bitfield(sk, flags);
1065 if (security_sk_alloc(sk, family, priority))
1068 if (!try_module_get(prot->owner))
1070 sk_tx_queue_clear(sk);
1076 security_sk_free(sk);
1079 kmem_cache_free(slab, sk);
1085 static void sk_prot_free(struct proto *prot, struct sock *sk)
1087 struct kmem_cache *slab;
1088 struct module *owner;
1090 owner = prot->owner;
1093 security_sk_free(sk);
1095 kmem_cache_free(slab, sk);
1101 #ifdef CONFIG_CGROUPS
1102 void sock_update_classid(struct sock *sk)
1106 rcu_read_lock(); /* doing current task, which cannot vanish. */
1107 classid = task_cls_classid(current);
1109 if (classid && classid != sk->sk_classid)
1110 sk->sk_classid = classid;
1112 EXPORT_SYMBOL(sock_update_classid);
1116 * sk_alloc - All socket objects are allocated here
1117 * @net: the applicable net namespace
1118 * @family: protocol family
1119 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1120 * @prot: struct proto associated with this new sock instance
1122 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1127 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1129 sk->sk_family = family;
1131 * See comment in struct sock definition to understand
1132 * why we need sk_prot_creator -acme
1134 sk->sk_prot = sk->sk_prot_creator = prot;
1136 sock_net_set(sk, get_net(net));
1137 atomic_set(&sk->sk_wmem_alloc, 1);
1139 sock_update_classid(sk);
1144 EXPORT_SYMBOL(sk_alloc);
1146 static void __sk_free(struct sock *sk)
1148 struct sk_filter *filter;
1150 if (sk->sk_destruct)
1151 sk->sk_destruct(sk);
1153 filter = rcu_dereference_check(sk->sk_filter,
1154 atomic_read(&sk->sk_wmem_alloc) == 0);
1156 sk_filter_uncharge(sk, filter);
1157 rcu_assign_pointer(sk->sk_filter, NULL);
1160 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1161 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
1163 if (atomic_read(&sk->sk_omem_alloc))
1164 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1165 __func__, atomic_read(&sk->sk_omem_alloc));
1167 if (sk->sk_peer_cred)
1168 put_cred(sk->sk_peer_cred);
1169 put_pid(sk->sk_peer_pid);
1170 put_net(sock_net(sk));
1171 sk_prot_free(sk->sk_prot_creator, sk);
1174 void sk_free(struct sock *sk)
1177 * We substract one from sk_wmem_alloc and can know if
1178 * some packets are still in some tx queue.
1179 * If not null, sock_wfree() will call __sk_free(sk) later
1181 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1184 EXPORT_SYMBOL(sk_free);
1187 * Last sock_put should drop referrence to sk->sk_net. It has already
1188 * been dropped in sk_change_net. Taking referrence to stopping namespace
1190 * Take referrence to a socket to remove it from hash _alive_ and after that
1191 * destroy it in the context of init_net.
1193 void sk_release_kernel(struct sock *sk)
1195 if (sk == NULL || sk->sk_socket == NULL)
1199 sock_release(sk->sk_socket);
1200 release_net(sock_net(sk));
1201 sock_net_set(sk, get_net(&init_net));
1204 EXPORT_SYMBOL(sk_release_kernel);
1206 struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1210 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1211 if (newsk != NULL) {
1212 struct sk_filter *filter;
1214 sock_copy(newsk, sk);
1217 get_net(sock_net(newsk));
1218 sk_node_init(&newsk->sk_node);
1219 sock_lock_init(newsk);
1220 bh_lock_sock(newsk);
1221 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1222 newsk->sk_backlog.len = 0;
1224 atomic_set(&newsk->sk_rmem_alloc, 0);
1226 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1228 atomic_set(&newsk->sk_wmem_alloc, 1);
1229 atomic_set(&newsk->sk_omem_alloc, 0);
1230 skb_queue_head_init(&newsk->sk_receive_queue);
1231 skb_queue_head_init(&newsk->sk_write_queue);
1232 #ifdef CONFIG_NET_DMA
1233 skb_queue_head_init(&newsk->sk_async_wait_queue);
1236 spin_lock_init(&newsk->sk_dst_lock);
1237 rwlock_init(&newsk->sk_callback_lock);
1238 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1239 af_callback_keys + newsk->sk_family,
1240 af_family_clock_key_strings[newsk->sk_family]);
1242 newsk->sk_dst_cache = NULL;
1243 newsk->sk_wmem_queued = 0;
1244 newsk->sk_forward_alloc = 0;
1245 newsk->sk_send_head = NULL;
1246 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1248 sock_reset_flag(newsk, SOCK_DONE);
1249 skb_queue_head_init(&newsk->sk_error_queue);
1251 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1253 sk_filter_charge(newsk, filter);
1255 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1256 /* It is still raw copy of parent, so invalidate
1257 * destructor and make plain sk_free() */
1258 newsk->sk_destruct = NULL;
1265 newsk->sk_priority = 0;
1267 * Before updating sk_refcnt, we must commit prior changes to memory
1268 * (Documentation/RCU/rculist_nulls.txt for details)
1271 atomic_set(&newsk->sk_refcnt, 2);
1274 * Increment the counter in the same struct proto as the master
1275 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1276 * is the same as sk->sk_prot->socks, as this field was copied
1279 * This _changes_ the previous behaviour, where
1280 * tcp_create_openreq_child always was incrementing the
1281 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1282 * to be taken into account in all callers. -acme
1284 sk_refcnt_debug_inc(newsk);
1285 sk_set_socket(newsk, NULL);
1286 newsk->sk_wq = NULL;
1288 if (newsk->sk_prot->sockets_allocated)
1289 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
1291 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1292 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1293 net_enable_timestamp();
1298 EXPORT_SYMBOL_GPL(sk_clone);
1300 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1302 __sk_dst_set(sk, dst);
1303 sk->sk_route_caps = dst->dev->features;
1304 if (sk->sk_route_caps & NETIF_F_GSO)
1305 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1306 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1307 if (sk_can_gso(sk)) {
1308 if (dst->header_len) {
1309 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1311 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1312 sk->sk_gso_max_size = dst->dev->gso_max_size;
1316 EXPORT_SYMBOL_GPL(sk_setup_caps);
1318 void __init sk_init(void)
1320 if (totalram_pages <= 4096) {
1321 sysctl_wmem_max = 32767;
1322 sysctl_rmem_max = 32767;
1323 sysctl_wmem_default = 32767;
1324 sysctl_rmem_default = 32767;
1325 } else if (totalram_pages >= 131072) {
1326 sysctl_wmem_max = 131071;
1327 sysctl_rmem_max = 131071;
1332 * Simple resource managers for sockets.
1337 * Write buffer destructor automatically called from kfree_skb.
1339 void sock_wfree(struct sk_buff *skb)
1341 struct sock *sk = skb->sk;
1342 unsigned int len = skb->truesize;
1344 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1346 * Keep a reference on sk_wmem_alloc, this will be released
1347 * after sk_write_space() call
1349 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1350 sk->sk_write_space(sk);
1354 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1355 * could not do because of in-flight packets
1357 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1360 EXPORT_SYMBOL(sock_wfree);
1363 * Read buffer destructor automatically called from kfree_skb.
1365 void sock_rfree(struct sk_buff *skb)
1367 struct sock *sk = skb->sk;
1368 unsigned int len = skb->truesize;
1370 atomic_sub(len, &sk->sk_rmem_alloc);
1371 sk_mem_uncharge(sk, len);
1373 EXPORT_SYMBOL(sock_rfree);
1376 int sock_i_uid(struct sock *sk)
1380 read_lock_bh(&sk->sk_callback_lock);
1381 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1382 read_unlock_bh(&sk->sk_callback_lock);
1385 EXPORT_SYMBOL(sock_i_uid);
1387 unsigned long sock_i_ino(struct sock *sk)
1391 read_lock_bh(&sk->sk_callback_lock);
1392 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1393 read_unlock_bh(&sk->sk_callback_lock);
1396 EXPORT_SYMBOL(sock_i_ino);
1399 * Allocate a skb from the socket's send buffer.
1401 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1404 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1405 struct sk_buff *skb = alloc_skb(size, priority);
1407 skb_set_owner_w(skb, sk);
1413 EXPORT_SYMBOL(sock_wmalloc);
1416 * Allocate a skb from the socket's receive buffer.
1418 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1421 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1422 struct sk_buff *skb = alloc_skb(size, priority);
1424 skb_set_owner_r(skb, sk);
1432 * Allocate a memory block from the socket's option memory buffer.
1434 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1436 if ((unsigned)size <= sysctl_optmem_max &&
1437 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1439 /* First do the add, to avoid the race if kmalloc
1442 atomic_add(size, &sk->sk_omem_alloc);
1443 mem = kmalloc(size, priority);
1446 atomic_sub(size, &sk->sk_omem_alloc);
1450 EXPORT_SYMBOL(sock_kmalloc);
1453 * Free an option memory block.
1455 void sock_kfree_s(struct sock *sk, void *mem, int size)
1458 atomic_sub(size, &sk->sk_omem_alloc);
1460 EXPORT_SYMBOL(sock_kfree_s);
1462 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1463 I think, these locks should be removed for datagram sockets.
1465 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1469 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1473 if (signal_pending(current))
1475 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1476 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1477 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1479 if (sk->sk_shutdown & SEND_SHUTDOWN)
1483 timeo = schedule_timeout(timeo);
1485 finish_wait(sk_sleep(sk), &wait);
1491 * Generic send/receive buffer handlers
1494 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1495 unsigned long data_len, int noblock,
1498 struct sk_buff *skb;
1503 gfp_mask = sk->sk_allocation;
1504 if (gfp_mask & __GFP_WAIT)
1505 gfp_mask |= __GFP_REPEAT;
1507 timeo = sock_sndtimeo(sk, noblock);
1509 err = sock_error(sk);
1514 if (sk->sk_shutdown & SEND_SHUTDOWN)
1517 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1518 skb = alloc_skb(header_len, gfp_mask);
1523 /* No pages, we're done... */
1527 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1528 skb->truesize += data_len;
1529 skb_shinfo(skb)->nr_frags = npages;
1530 for (i = 0; i < npages; i++) {
1534 page = alloc_pages(sk->sk_allocation, 0);
1537 skb_shinfo(skb)->nr_frags = i;
1542 frag = &skb_shinfo(skb)->frags[i];
1544 frag->page_offset = 0;
1545 frag->size = (data_len >= PAGE_SIZE ?
1548 data_len -= PAGE_SIZE;
1551 /* Full success... */
1557 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1558 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1562 if (signal_pending(current))
1564 timeo = sock_wait_for_wmem(sk, timeo);
1567 skb_set_owner_w(skb, sk);
1571 err = sock_intr_errno(timeo);
1576 EXPORT_SYMBOL(sock_alloc_send_pskb);
1578 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1579 int noblock, int *errcode)
1581 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1583 EXPORT_SYMBOL(sock_alloc_send_skb);
1585 static void __lock_sock(struct sock *sk)
1586 __releases(&sk->sk_lock.slock)
1587 __acquires(&sk->sk_lock.slock)
1592 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1593 TASK_UNINTERRUPTIBLE);
1594 spin_unlock_bh(&sk->sk_lock.slock);
1596 spin_lock_bh(&sk->sk_lock.slock);
1597 if (!sock_owned_by_user(sk))
1600 finish_wait(&sk->sk_lock.wq, &wait);
1603 static void __release_sock(struct sock *sk)
1604 __releases(&sk->sk_lock.slock)
1605 __acquires(&sk->sk_lock.slock)
1607 struct sk_buff *skb = sk->sk_backlog.head;
1610 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1614 struct sk_buff *next = skb->next;
1616 WARN_ON_ONCE(skb_dst_is_noref(skb));
1618 sk_backlog_rcv(sk, skb);
1621 * We are in process context here with softirqs
1622 * disabled, use cond_resched_softirq() to preempt.
1623 * This is safe to do because we've taken the backlog
1626 cond_resched_softirq();
1629 } while (skb != NULL);
1632 } while ((skb = sk->sk_backlog.head) != NULL);
1635 * Doing the zeroing here guarantee we can not loop forever
1636 * while a wild producer attempts to flood us.
1638 sk->sk_backlog.len = 0;
1642 * sk_wait_data - wait for data to arrive at sk_receive_queue
1643 * @sk: sock to wait on
1644 * @timeo: for how long
1646 * Now socket state including sk->sk_err is changed only under lock,
1647 * hence we may omit checks after joining wait queue.
1648 * We check receive queue before schedule() only as optimization;
1649 * it is very likely that release_sock() added new data.
1651 int sk_wait_data(struct sock *sk, long *timeo)
1656 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1657 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1658 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1659 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1660 finish_wait(sk_sleep(sk), &wait);
1663 EXPORT_SYMBOL(sk_wait_data);
1666 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1668 * @size: memory size to allocate
1669 * @kind: allocation type
1671 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1672 * rmem allocation. This function assumes that protocols which have
1673 * memory_pressure use sk_wmem_queued as write buffer accounting.
1675 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1677 struct proto *prot = sk->sk_prot;
1678 int amt = sk_mem_pages(size);
1681 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1682 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1685 if (allocated <= prot->sysctl_mem[0]) {
1686 if (prot->memory_pressure && *prot->memory_pressure)
1687 *prot->memory_pressure = 0;
1691 /* Under pressure. */
1692 if (allocated > prot->sysctl_mem[1])
1693 if (prot->enter_memory_pressure)
1694 prot->enter_memory_pressure(sk);
1696 /* Over hard limit. */
1697 if (allocated > prot->sysctl_mem[2])
1698 goto suppress_allocation;
1700 /* guarantee minimum buffer size under pressure */
1701 if (kind == SK_MEM_RECV) {
1702 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1704 } else { /* SK_MEM_SEND */
1705 if (sk->sk_type == SOCK_STREAM) {
1706 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1708 } else if (atomic_read(&sk->sk_wmem_alloc) <
1709 prot->sysctl_wmem[0])
1713 if (prot->memory_pressure) {
1716 if (!*prot->memory_pressure)
1718 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1719 if (prot->sysctl_mem[2] > alloc *
1720 sk_mem_pages(sk->sk_wmem_queued +
1721 atomic_read(&sk->sk_rmem_alloc) +
1722 sk->sk_forward_alloc))
1726 suppress_allocation:
1728 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1729 sk_stream_moderate_sndbuf(sk);
1731 /* Fail only if socket is _under_ its sndbuf.
1732 * In this case we cannot block, so that we have to fail.
1734 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1738 /* Alas. Undo changes. */
1739 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1740 atomic_long_sub(amt, prot->memory_allocated);
1743 EXPORT_SYMBOL(__sk_mem_schedule);
1746 * __sk_reclaim - reclaim memory_allocated
1749 void __sk_mem_reclaim(struct sock *sk)
1751 struct proto *prot = sk->sk_prot;
1753 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1754 prot->memory_allocated);
1755 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1757 if (prot->memory_pressure && *prot->memory_pressure &&
1758 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1759 *prot->memory_pressure = 0;
1761 EXPORT_SYMBOL(__sk_mem_reclaim);
1765 * Set of default routines for initialising struct proto_ops when
1766 * the protocol does not support a particular function. In certain
1767 * cases where it makes no sense for a protocol to have a "do nothing"
1768 * function, some default processing is provided.
1771 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1775 EXPORT_SYMBOL(sock_no_bind);
1777 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1782 EXPORT_SYMBOL(sock_no_connect);
1784 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1788 EXPORT_SYMBOL(sock_no_socketpair);
1790 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1794 EXPORT_SYMBOL(sock_no_accept);
1796 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1801 EXPORT_SYMBOL(sock_no_getname);
1803 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1807 EXPORT_SYMBOL(sock_no_poll);
1809 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1813 EXPORT_SYMBOL(sock_no_ioctl);
1815 int sock_no_listen(struct socket *sock, int backlog)
1819 EXPORT_SYMBOL(sock_no_listen);
1821 int sock_no_shutdown(struct socket *sock, int how)
1825 EXPORT_SYMBOL(sock_no_shutdown);
1827 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1828 char __user *optval, unsigned int optlen)
1832 EXPORT_SYMBOL(sock_no_setsockopt);
1834 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1835 char __user *optval, int __user *optlen)
1839 EXPORT_SYMBOL(sock_no_getsockopt);
1841 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1846 EXPORT_SYMBOL(sock_no_sendmsg);
1848 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1849 size_t len, int flags)
1853 EXPORT_SYMBOL(sock_no_recvmsg);
1855 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1857 /* Mirror missing mmap method error code */
1860 EXPORT_SYMBOL(sock_no_mmap);
1862 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1865 struct msghdr msg = {.msg_flags = flags};
1867 char *kaddr = kmap(page);
1868 iov.iov_base = kaddr + offset;
1870 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1874 EXPORT_SYMBOL(sock_no_sendpage);
1877 * Default Socket Callbacks
1880 static void sock_def_wakeup(struct sock *sk)
1882 struct socket_wq *wq;
1885 wq = rcu_dereference(sk->sk_wq);
1886 if (wq_has_sleeper(wq))
1887 wake_up_interruptible_all(&wq->wait);
1891 static void sock_def_error_report(struct sock *sk)
1893 struct socket_wq *wq;
1896 wq = rcu_dereference(sk->sk_wq);
1897 if (wq_has_sleeper(wq))
1898 wake_up_interruptible_poll(&wq->wait, POLLERR);
1899 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1903 static void sock_def_readable(struct sock *sk, int len)
1905 struct socket_wq *wq;
1908 wq = rcu_dereference(sk->sk_wq);
1909 if (wq_has_sleeper(wq))
1910 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1911 POLLRDNORM | POLLRDBAND);
1912 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1916 static void sock_def_write_space(struct sock *sk)
1918 struct socket_wq *wq;
1922 /* Do not wake up a writer until he can make "significant"
1925 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1926 wq = rcu_dereference(sk->sk_wq);
1927 if (wq_has_sleeper(wq))
1928 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1929 POLLWRNORM | POLLWRBAND);
1931 /* Should agree with poll, otherwise some programs break */
1932 if (sock_writeable(sk))
1933 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1939 static void sock_def_destruct(struct sock *sk)
1941 kfree(sk->sk_protinfo);
1944 void sk_send_sigurg(struct sock *sk)
1946 if (sk->sk_socket && sk->sk_socket->file)
1947 if (send_sigurg(&sk->sk_socket->file->f_owner))
1948 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1950 EXPORT_SYMBOL(sk_send_sigurg);
1952 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1953 unsigned long expires)
1955 if (!mod_timer(timer, expires))
1958 EXPORT_SYMBOL(sk_reset_timer);
1960 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1962 if (timer_pending(timer) && del_timer(timer))
1965 EXPORT_SYMBOL(sk_stop_timer);
1967 void sock_init_data(struct socket *sock, struct sock *sk)
1969 skb_queue_head_init(&sk->sk_receive_queue);
1970 skb_queue_head_init(&sk->sk_write_queue);
1971 skb_queue_head_init(&sk->sk_error_queue);
1972 #ifdef CONFIG_NET_DMA
1973 skb_queue_head_init(&sk->sk_async_wait_queue);
1976 sk->sk_send_head = NULL;
1978 init_timer(&sk->sk_timer);
1980 sk->sk_allocation = GFP_KERNEL;
1981 sk->sk_rcvbuf = sysctl_rmem_default;
1982 sk->sk_sndbuf = sysctl_wmem_default;
1983 sk->sk_state = TCP_CLOSE;
1984 sk_set_socket(sk, sock);
1986 sock_set_flag(sk, SOCK_ZAPPED);
1989 sk->sk_type = sock->type;
1990 sk->sk_wq = sock->wq;
1995 spin_lock_init(&sk->sk_dst_lock);
1996 rwlock_init(&sk->sk_callback_lock);
1997 lockdep_set_class_and_name(&sk->sk_callback_lock,
1998 af_callback_keys + sk->sk_family,
1999 af_family_clock_key_strings[sk->sk_family]);
2001 sk->sk_state_change = sock_def_wakeup;
2002 sk->sk_data_ready = sock_def_readable;
2003 sk->sk_write_space = sock_def_write_space;
2004 sk->sk_error_report = sock_def_error_report;
2005 sk->sk_destruct = sock_def_destruct;
2007 sk->sk_sndmsg_page = NULL;
2008 sk->sk_sndmsg_off = 0;
2010 sk->sk_peer_pid = NULL;
2011 sk->sk_peer_cred = NULL;
2012 sk->sk_write_pending = 0;
2013 sk->sk_rcvlowat = 1;
2014 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2015 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2017 sk->sk_stamp = ktime_set(-1L, 0);
2020 * Before updating sk_refcnt, we must commit prior changes to memory
2021 * (Documentation/RCU/rculist_nulls.txt for details)
2024 atomic_set(&sk->sk_refcnt, 1);
2025 atomic_set(&sk->sk_drops, 0);
2027 EXPORT_SYMBOL(sock_init_data);
2029 void lock_sock_nested(struct sock *sk, int subclass)
2032 spin_lock_bh(&sk->sk_lock.slock);
2033 if (sk->sk_lock.owned)
2035 sk->sk_lock.owned = 1;
2036 spin_unlock(&sk->sk_lock.slock);
2038 * The sk_lock has mutex_lock() semantics here:
2040 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2043 EXPORT_SYMBOL(lock_sock_nested);
2045 void release_sock(struct sock *sk)
2048 * The sk_lock has mutex_unlock() semantics:
2050 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2052 spin_lock_bh(&sk->sk_lock.slock);
2053 if (sk->sk_backlog.tail)
2055 sk->sk_lock.owned = 0;
2056 if (waitqueue_active(&sk->sk_lock.wq))
2057 wake_up(&sk->sk_lock.wq);
2058 spin_unlock_bh(&sk->sk_lock.slock);
2060 EXPORT_SYMBOL(release_sock);
2063 * lock_sock_fast - fast version of lock_sock
2066 * This version should be used for very small section, where process wont block
2067 * return false if fast path is taken
2068 * sk_lock.slock locked, owned = 0, BH disabled
2069 * return true if slow path is taken
2070 * sk_lock.slock unlocked, owned = 1, BH enabled
2072 bool lock_sock_fast(struct sock *sk)
2075 spin_lock_bh(&sk->sk_lock.slock);
2077 if (!sk->sk_lock.owned)
2079 * Note : We must disable BH
2084 sk->sk_lock.owned = 1;
2085 spin_unlock(&sk->sk_lock.slock);
2087 * The sk_lock has mutex_lock() semantics here:
2089 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2093 EXPORT_SYMBOL(lock_sock_fast);
2095 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2098 if (!sock_flag(sk, SOCK_TIMESTAMP))
2099 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2100 tv = ktime_to_timeval(sk->sk_stamp);
2101 if (tv.tv_sec == -1)
2103 if (tv.tv_sec == 0) {
2104 sk->sk_stamp = ktime_get_real();
2105 tv = ktime_to_timeval(sk->sk_stamp);
2107 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2109 EXPORT_SYMBOL(sock_get_timestamp);
2111 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2114 if (!sock_flag(sk, SOCK_TIMESTAMP))
2115 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2116 ts = ktime_to_timespec(sk->sk_stamp);
2117 if (ts.tv_sec == -1)
2119 if (ts.tv_sec == 0) {
2120 sk->sk_stamp = ktime_get_real();
2121 ts = ktime_to_timespec(sk->sk_stamp);
2123 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2125 EXPORT_SYMBOL(sock_get_timestampns);
2127 void sock_enable_timestamp(struct sock *sk, int flag)
2129 if (!sock_flag(sk, flag)) {
2130 sock_set_flag(sk, flag);
2132 * we just set one of the two flags which require net
2133 * time stamping, but time stamping might have been on
2134 * already because of the other one
2137 flag == SOCK_TIMESTAMP ?
2138 SOCK_TIMESTAMPING_RX_SOFTWARE :
2140 net_enable_timestamp();
2145 * Get a socket option on an socket.
2147 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2148 * asynchronous errors should be reported by getsockopt. We assume
2149 * this means if you specify SO_ERROR (otherwise whats the point of it).
2151 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2152 char __user *optval, int __user *optlen)
2154 struct sock *sk = sock->sk;
2156 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2158 EXPORT_SYMBOL(sock_common_getsockopt);
2160 #ifdef CONFIG_COMPAT
2161 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2162 char __user *optval, int __user *optlen)
2164 struct sock *sk = sock->sk;
2166 if (sk->sk_prot->compat_getsockopt != NULL)
2167 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2169 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2171 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2174 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2175 struct msghdr *msg, size_t size, int flags)
2177 struct sock *sk = sock->sk;
2181 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2182 flags & ~MSG_DONTWAIT, &addr_len);
2184 msg->msg_namelen = addr_len;
2187 EXPORT_SYMBOL(sock_common_recvmsg);
2190 * Set socket options on an inet socket.
2192 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2193 char __user *optval, unsigned int optlen)
2195 struct sock *sk = sock->sk;
2197 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2199 EXPORT_SYMBOL(sock_common_setsockopt);
2201 #ifdef CONFIG_COMPAT
2202 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2203 char __user *optval, unsigned int optlen)
2205 struct sock *sk = sock->sk;
2207 if (sk->sk_prot->compat_setsockopt != NULL)
2208 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2210 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2212 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2215 void sk_common_release(struct sock *sk)
2217 if (sk->sk_prot->destroy)
2218 sk->sk_prot->destroy(sk);
2221 * Observation: when sock_common_release is called, processes have
2222 * no access to socket. But net still has.
2223 * Step one, detach it from networking:
2225 * A. Remove from hash tables.
2228 sk->sk_prot->unhash(sk);
2231 * In this point socket cannot receive new packets, but it is possible
2232 * that some packets are in flight because some CPU runs receiver and
2233 * did hash table lookup before we unhashed socket. They will achieve
2234 * receive queue and will be purged by socket destructor.
2236 * Also we still have packets pending on receive queue and probably,
2237 * our own packets waiting in device queues. sock_destroy will drain
2238 * receive queue, but transmitted packets will delay socket destruction
2239 * until the last reference will be released.
2244 xfrm_sk_free_policy(sk);
2246 sk_refcnt_debug_release(sk);
2249 EXPORT_SYMBOL(sk_common_release);
2251 static DEFINE_RWLOCK(proto_list_lock);
2252 static LIST_HEAD(proto_list);
2254 #ifdef CONFIG_PROC_FS
2255 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2257 int val[PROTO_INUSE_NR];
2260 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2262 #ifdef CONFIG_NET_NS
2263 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2265 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2267 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2269 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2271 int cpu, idx = prot->inuse_idx;
2274 for_each_possible_cpu(cpu)
2275 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2277 return res >= 0 ? res : 0;
2279 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2281 static int __net_init sock_inuse_init_net(struct net *net)
2283 net->core.inuse = alloc_percpu(struct prot_inuse);
2284 return net->core.inuse ? 0 : -ENOMEM;
2287 static void __net_exit sock_inuse_exit_net(struct net *net)
2289 free_percpu(net->core.inuse);
2292 static struct pernet_operations net_inuse_ops = {
2293 .init = sock_inuse_init_net,
2294 .exit = sock_inuse_exit_net,
2297 static __init int net_inuse_init(void)
2299 if (register_pernet_subsys(&net_inuse_ops))
2300 panic("Cannot initialize net inuse counters");
2305 core_initcall(net_inuse_init);
2307 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2309 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2311 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2313 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2315 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2317 int cpu, idx = prot->inuse_idx;
2320 for_each_possible_cpu(cpu)
2321 res += per_cpu(prot_inuse, cpu).val[idx];
2323 return res >= 0 ? res : 0;
2325 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2328 static void assign_proto_idx(struct proto *prot)
2330 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2332 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2333 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2337 set_bit(prot->inuse_idx, proto_inuse_idx);
2340 static void release_proto_idx(struct proto *prot)
2342 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2343 clear_bit(prot->inuse_idx, proto_inuse_idx);
2346 static inline void assign_proto_idx(struct proto *prot)
2350 static inline void release_proto_idx(struct proto *prot)
2355 int proto_register(struct proto *prot, int alloc_slab)
2358 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2359 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2362 if (prot->slab == NULL) {
2363 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2368 if (prot->rsk_prot != NULL) {
2369 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2370 if (prot->rsk_prot->slab_name == NULL)
2371 goto out_free_sock_slab;
2373 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2374 prot->rsk_prot->obj_size, 0,
2375 SLAB_HWCACHE_ALIGN, NULL);
2377 if (prot->rsk_prot->slab == NULL) {
2378 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2380 goto out_free_request_sock_slab_name;
2384 if (prot->twsk_prot != NULL) {
2385 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2387 if (prot->twsk_prot->twsk_slab_name == NULL)
2388 goto out_free_request_sock_slab;
2390 prot->twsk_prot->twsk_slab =
2391 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2392 prot->twsk_prot->twsk_obj_size,
2394 SLAB_HWCACHE_ALIGN |
2397 if (prot->twsk_prot->twsk_slab == NULL)
2398 goto out_free_timewait_sock_slab_name;
2402 write_lock(&proto_list_lock);
2403 list_add(&prot->node, &proto_list);
2404 assign_proto_idx(prot);
2405 write_unlock(&proto_list_lock);
2408 out_free_timewait_sock_slab_name:
2409 kfree(prot->twsk_prot->twsk_slab_name);
2410 out_free_request_sock_slab:
2411 if (prot->rsk_prot && prot->rsk_prot->slab) {
2412 kmem_cache_destroy(prot->rsk_prot->slab);
2413 prot->rsk_prot->slab = NULL;
2415 out_free_request_sock_slab_name:
2417 kfree(prot->rsk_prot->slab_name);
2419 kmem_cache_destroy(prot->slab);
2424 EXPORT_SYMBOL(proto_register);
2426 void proto_unregister(struct proto *prot)
2428 write_lock(&proto_list_lock);
2429 release_proto_idx(prot);
2430 list_del(&prot->node);
2431 write_unlock(&proto_list_lock);
2433 if (prot->slab != NULL) {
2434 kmem_cache_destroy(prot->slab);
2438 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2439 kmem_cache_destroy(prot->rsk_prot->slab);
2440 kfree(prot->rsk_prot->slab_name);
2441 prot->rsk_prot->slab = NULL;
2444 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2445 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2446 kfree(prot->twsk_prot->twsk_slab_name);
2447 prot->twsk_prot->twsk_slab = NULL;
2450 EXPORT_SYMBOL(proto_unregister);
2452 #ifdef CONFIG_PROC_FS
2453 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2454 __acquires(proto_list_lock)
2456 read_lock(&proto_list_lock);
2457 return seq_list_start_head(&proto_list, *pos);
2460 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2462 return seq_list_next(v, &proto_list, pos);
2465 static void proto_seq_stop(struct seq_file *seq, void *v)
2466 __releases(proto_list_lock)
2468 read_unlock(&proto_list_lock);
2471 static char proto_method_implemented(const void *method)
2473 return method == NULL ? 'n' : 'y';
2476 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2478 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2479 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2482 sock_prot_inuse_get(seq_file_net(seq), proto),
2483 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2484 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2486 proto->slab == NULL ? "no" : "yes",
2487 module_name(proto->owner),
2488 proto_method_implemented(proto->close),
2489 proto_method_implemented(proto->connect),
2490 proto_method_implemented(proto->disconnect),
2491 proto_method_implemented(proto->accept),
2492 proto_method_implemented(proto->ioctl),
2493 proto_method_implemented(proto->init),
2494 proto_method_implemented(proto->destroy),
2495 proto_method_implemented(proto->shutdown),
2496 proto_method_implemented(proto->setsockopt),
2497 proto_method_implemented(proto->getsockopt),
2498 proto_method_implemented(proto->sendmsg),
2499 proto_method_implemented(proto->recvmsg),
2500 proto_method_implemented(proto->sendpage),
2501 proto_method_implemented(proto->bind),
2502 proto_method_implemented(proto->backlog_rcv),
2503 proto_method_implemented(proto->hash),
2504 proto_method_implemented(proto->unhash),
2505 proto_method_implemented(proto->get_port),
2506 proto_method_implemented(proto->enter_memory_pressure));
2509 static int proto_seq_show(struct seq_file *seq, void *v)
2511 if (v == &proto_list)
2512 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2521 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2523 proto_seq_printf(seq, list_entry(v, struct proto, node));
2527 static const struct seq_operations proto_seq_ops = {
2528 .start = proto_seq_start,
2529 .next = proto_seq_next,
2530 .stop = proto_seq_stop,
2531 .show = proto_seq_show,
2534 static int proto_seq_open(struct inode *inode, struct file *file)
2536 return seq_open_net(inode, file, &proto_seq_ops,
2537 sizeof(struct seq_net_private));
2540 static const struct file_operations proto_seq_fops = {
2541 .owner = THIS_MODULE,
2542 .open = proto_seq_open,
2544 .llseek = seq_lseek,
2545 .release = seq_release_net,
2548 static __net_init int proto_init_net(struct net *net)
2550 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2556 static __net_exit void proto_exit_net(struct net *net)
2558 proc_net_remove(net, "protocols");
2562 static __net_initdata struct pernet_operations proto_net_ops = {
2563 .init = proto_init_net,
2564 .exit = proto_exit_net,
2567 static int __init proto_init(void)
2569 return register_pernet_subsys(&proto_net_ops);
2572 subsys_initcall(proto_init);
2574 #endif /* PROC_FS */