2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <net/net_namespace.h>
66 #include <net/protocol.h>
67 #include <linux/skbuff.h>
69 #include <linux/errno.h>
70 #include <linux/timer.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/ioctls.h>
75 #include <asm/cacheflush.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/poll.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
82 #include <linux/mutex.h>
83 #include <linux/if_vlan.h>
84 #include <linux/virtio_net.h>
85 #include <linux/errqueue.h>
88 #include <net/inet_common.h>
93 - if device has no dev->hard_header routine, it adds and removes ll header
94 inside itself. In this case ll header is invisible outside of device,
95 but higher levels still should reserve dev->hard_header_len.
96 Some devices are enough clever to reallocate skb, when header
97 will not fit to reserved space (tunnel), another ones are silly
99 - packet socket receives packets with pulled ll header,
100 so that SOCK_RAW should push it back.
105 Incoming, dev->hard_header!=NULL
106 mac_header -> ll header
109 Outgoing, dev->hard_header!=NULL
110 mac_header -> ll header
113 Incoming, dev->hard_header==NULL
114 mac_header -> UNKNOWN position. It is very likely, that it points to ll
115 header. PPP makes it, that is wrong, because introduce
116 assymetry between rx and tx paths.
119 Outgoing, dev->hard_header==NULL
120 mac_header -> data. ll header is still not built!
124 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
130 dev->hard_header != NULL
131 mac_header -> ll header
134 dev->hard_header == NULL (ll header is added by device, we cannot control it)
138 We should set nh.raw on output to correct posistion,
139 packet classifier depends on it.
142 /* Private packet socket structures. */
144 struct packet_mclist {
145 struct packet_mclist *next;
150 unsigned char addr[MAX_ADDR_LEN];
152 /* identical to struct packet_mreq except it has
153 * a longer address field.
155 struct packet_mreq_max {
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
162 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
163 int closing, int tx_ring);
165 struct packet_ring_buffer {
168 unsigned int frames_per_block;
169 unsigned int frame_size;
170 unsigned int frame_max;
172 unsigned int pg_vec_order;
173 unsigned int pg_vec_pages;
174 unsigned int pg_vec_len;
180 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
182 static void packet_flush_mclist(struct sock *sk);
185 /* struct sock has to be the first member of packet_sock */
187 struct tpacket_stats stats;
188 struct packet_ring_buffer rx_ring;
189 struct packet_ring_buffer tx_ring;
191 spinlock_t bind_lock;
192 struct mutex pg_vec_lock;
193 unsigned int running:1, /* prot_hook is attached*/
197 int ifindex; /* bound device */
199 struct packet_mclist *mclist;
201 enum tpacket_versions tp_version;
202 unsigned int tp_hdrlen;
203 unsigned int tp_reserve;
204 unsigned int tp_loss:1;
205 struct packet_type prot_hook ____cacheline_aligned_in_smp;
208 struct packet_skb_cb {
209 unsigned int origlen;
211 struct sockaddr_pkt pkt;
212 struct sockaddr_ll ll;
216 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
218 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
221 struct tpacket_hdr *h1;
222 struct tpacket2_hdr *h2;
227 switch (po->tp_version) {
229 h.h1->tp_status = status;
230 flush_dcache_page(virt_to_page(&h.h1->tp_status));
233 h.h2->tp_status = status;
234 flush_dcache_page(virt_to_page(&h.h2->tp_status));
237 pr_err("TPACKET version not supported\n");
244 static int __packet_get_status(struct packet_sock *po, void *frame)
247 struct tpacket_hdr *h1;
248 struct tpacket2_hdr *h2;
255 switch (po->tp_version) {
257 flush_dcache_page(virt_to_page(&h.h1->tp_status));
258 return h.h1->tp_status;
260 flush_dcache_page(virt_to_page(&h.h2->tp_status));
261 return h.h2->tp_status;
263 pr_err("TPACKET version not supported\n");
269 static void *packet_lookup_frame(struct packet_sock *po,
270 struct packet_ring_buffer *rb,
271 unsigned int position,
274 unsigned int pg_vec_pos, frame_offset;
276 struct tpacket_hdr *h1;
277 struct tpacket2_hdr *h2;
281 pg_vec_pos = position / rb->frames_per_block;
282 frame_offset = position % rb->frames_per_block;
284 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
286 if (status != __packet_get_status(po, h.raw))
292 static inline void *packet_current_frame(struct packet_sock *po,
293 struct packet_ring_buffer *rb,
296 return packet_lookup_frame(po, rb, rb->head, status);
299 static inline void *packet_previous_frame(struct packet_sock *po,
300 struct packet_ring_buffer *rb,
303 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
304 return packet_lookup_frame(po, rb, previous, status);
307 static inline void packet_increment_head(struct packet_ring_buffer *buff)
309 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
312 static inline struct packet_sock *pkt_sk(struct sock *sk)
314 return (struct packet_sock *)sk;
317 static void packet_sock_destruct(struct sock *sk)
319 skb_queue_purge(&sk->sk_error_queue);
321 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
322 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
324 if (!sock_flag(sk, SOCK_DEAD)) {
325 pr_err("Attempt to release alive packet socket: %p\n", sk);
329 sk_refcnt_debug_dec(sk);
333 static const struct proto_ops packet_ops;
335 static const struct proto_ops packet_ops_spkt;
337 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
338 struct packet_type *pt, struct net_device *orig_dev)
341 struct sockaddr_pkt *spkt;
344 * When we registered the protocol we saved the socket in the data
345 * field for just this event.
348 sk = pt->af_packet_priv;
351 * Yank back the headers [hope the device set this
352 * right or kerboom...]
354 * Incoming packets have ll header pulled,
357 * For outgoing ones skb->data == skb_mac_header(skb)
358 * so that this procedure is noop.
361 if (skb->pkt_type == PACKET_LOOPBACK)
364 if (!net_eq(dev_net(dev), sock_net(sk)))
367 skb = skb_share_check(skb, GFP_ATOMIC);
371 /* drop any routing info */
374 /* drop conntrack reference */
377 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
379 skb_push(skb, skb->data - skb_mac_header(skb));
382 * The SOCK_PACKET socket receives _all_ frames.
385 spkt->spkt_family = dev->type;
386 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
387 spkt->spkt_protocol = skb->protocol;
390 * Charge the memory to the socket. This is done specifically
391 * to prevent sockets using all the memory up.
394 if (sock_queue_rcv_skb(sk, skb) == 0)
405 * Output a raw packet to a device layer. This bypasses all the other
406 * protocol layers and you must therefore supply it with a complete frame
409 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
410 struct msghdr *msg, size_t len)
412 struct sock *sk = sock->sk;
413 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
414 struct sk_buff *skb = NULL;
415 struct net_device *dev;
420 * Get and verify the address.
424 if (msg->msg_namelen < sizeof(struct sockaddr))
426 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
427 proto = saddr->spkt_protocol;
429 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
432 * Find the device first to size check it
435 saddr->spkt_device[13] = 0;
438 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
444 if (!(dev->flags & IFF_UP))
448 * You may not queue a frame bigger than the mtu. This is the lowest level
449 * raw protocol and you must do your own fragmentation at this level.
453 if (len > dev->mtu + dev->hard_header_len)
457 size_t reserved = LL_RESERVED_SPACE(dev);
458 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
461 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
464 /* FIXME: Save some space for broken drivers that write a hard
465 * header at transmission time by themselves. PPP is the notable
466 * one here. This should really be fixed at the driver level.
468 skb_reserve(skb, reserved);
469 skb_reset_network_header(skb);
471 /* Try to align data part correctly */
476 skb_reset_network_header(skb);
478 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
485 skb->protocol = proto;
487 skb->priority = sk->sk_priority;
488 skb->mark = sk->sk_mark;
489 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
504 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
507 struct sk_filter *filter;
510 filter = rcu_dereference_bh(sk->sk_filter);
512 res = sk_run_filter(skb, filter->insns, filter->len);
513 rcu_read_unlock_bh();
519 This function makes lazy skb cloning in hope that most of packets
520 are discarded by BPF.
522 Note tricky part: we DO mangle shared skb! skb->data, skb->len
523 and skb->cb are mangled. It works because (and until) packets
524 falling here are owned by current CPU. Output packets are cloned
525 by dev_queue_xmit_nit(), input packets are processed by net_bh
526 sequencially, so that if we return skb to original state on exit,
527 we will not harm anyone.
530 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
531 struct packet_type *pt, struct net_device *orig_dev)
534 struct sockaddr_ll *sll;
535 struct packet_sock *po;
536 u8 *skb_head = skb->data;
537 int skb_len = skb->len;
538 unsigned int snaplen, res;
540 if (skb->pkt_type == PACKET_LOOPBACK)
543 sk = pt->af_packet_priv;
546 if (!net_eq(dev_net(dev), sock_net(sk)))
551 if (dev->header_ops) {
552 /* The device has an explicit notion of ll header,
553 exported to higher levels.
555 Otherwise, the device hides datails of it frame
556 structure, so that corresponding packet head
557 never delivered to user.
559 if (sk->sk_type != SOCK_DGRAM)
560 skb_push(skb, skb->data - skb_mac_header(skb));
561 else if (skb->pkt_type == PACKET_OUTGOING) {
562 /* Special case: outgoing packets have ll header at head */
563 skb_pull(skb, skb_network_offset(skb));
569 res = run_filter(skb, sk, snaplen);
575 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
576 (unsigned)sk->sk_rcvbuf)
579 if (skb_shared(skb)) {
580 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
584 if (skb_head != skb->data) {
585 skb->data = skb_head;
592 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
595 sll = &PACKET_SKB_CB(skb)->sa.ll;
596 sll->sll_family = AF_PACKET;
597 sll->sll_hatype = dev->type;
598 sll->sll_protocol = skb->protocol;
599 sll->sll_pkttype = skb->pkt_type;
600 if (unlikely(po->origdev))
601 sll->sll_ifindex = orig_dev->ifindex;
603 sll->sll_ifindex = dev->ifindex;
605 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
607 PACKET_SKB_CB(skb)->origlen = skb->len;
609 if (pskb_trim(skb, snaplen))
612 skb_set_owner_r(skb, sk);
616 /* drop conntrack reference */
619 spin_lock(&sk->sk_receive_queue.lock);
620 po->stats.tp_packets++;
621 skb->dropcount = atomic_read(&sk->sk_drops);
622 __skb_queue_tail(&sk->sk_receive_queue, skb);
623 spin_unlock(&sk->sk_receive_queue.lock);
624 sk->sk_data_ready(sk, skb->len);
628 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
631 if (skb_head != skb->data && skb_shared(skb)) {
632 skb->data = skb_head;
640 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
641 struct packet_type *pt, struct net_device *orig_dev)
644 struct packet_sock *po;
645 struct sockaddr_ll *sll;
647 struct tpacket_hdr *h1;
648 struct tpacket2_hdr *h2;
651 u8 *skb_head = skb->data;
652 int skb_len = skb->len;
653 unsigned int snaplen, res;
654 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
655 unsigned short macoff, netoff, hdrlen;
656 struct sk_buff *copy_skb = NULL;
660 if (skb->pkt_type == PACKET_LOOPBACK)
663 sk = pt->af_packet_priv;
666 if (!net_eq(dev_net(dev), sock_net(sk)))
669 if (dev->header_ops) {
670 if (sk->sk_type != SOCK_DGRAM)
671 skb_push(skb, skb->data - skb_mac_header(skb));
672 else if (skb->pkt_type == PACKET_OUTGOING) {
673 /* Special case: outgoing packets have ll header at head */
674 skb_pull(skb, skb_network_offset(skb));
678 if (skb->ip_summed == CHECKSUM_PARTIAL)
679 status |= TP_STATUS_CSUMNOTREADY;
683 res = run_filter(skb, sk, snaplen);
689 if (sk->sk_type == SOCK_DGRAM) {
690 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
693 unsigned maclen = skb_network_offset(skb);
694 netoff = TPACKET_ALIGN(po->tp_hdrlen +
695 (maclen < 16 ? 16 : maclen)) +
697 macoff = netoff - maclen;
700 if (macoff + snaplen > po->rx_ring.frame_size) {
701 if (po->copy_thresh &&
702 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
703 (unsigned)sk->sk_rcvbuf) {
704 if (skb_shared(skb)) {
705 copy_skb = skb_clone(skb, GFP_ATOMIC);
707 copy_skb = skb_get(skb);
708 skb_head = skb->data;
711 skb_set_owner_r(copy_skb, sk);
713 snaplen = po->rx_ring.frame_size - macoff;
714 if ((int)snaplen < 0)
718 spin_lock(&sk->sk_receive_queue.lock);
719 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
722 packet_increment_head(&po->rx_ring);
723 po->stats.tp_packets++;
725 status |= TP_STATUS_COPY;
726 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
728 if (!po->stats.tp_drops)
729 status &= ~TP_STATUS_LOSING;
730 spin_unlock(&sk->sk_receive_queue.lock);
732 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
734 switch (po->tp_version) {
736 h.h1->tp_len = skb->len;
737 h.h1->tp_snaplen = snaplen;
738 h.h1->tp_mac = macoff;
739 h.h1->tp_net = netoff;
740 if (skb->tstamp.tv64)
741 tv = ktime_to_timeval(skb->tstamp);
743 do_gettimeofday(&tv);
744 h.h1->tp_sec = tv.tv_sec;
745 h.h1->tp_usec = tv.tv_usec;
746 hdrlen = sizeof(*h.h1);
749 h.h2->tp_len = skb->len;
750 h.h2->tp_snaplen = snaplen;
751 h.h2->tp_mac = macoff;
752 h.h2->tp_net = netoff;
753 if (skb->tstamp.tv64)
754 ts = ktime_to_timespec(skb->tstamp);
757 h.h2->tp_sec = ts.tv_sec;
758 h.h2->tp_nsec = ts.tv_nsec;
759 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
760 hdrlen = sizeof(*h.h2);
766 sll = h.raw + TPACKET_ALIGN(hdrlen);
767 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
768 sll->sll_family = AF_PACKET;
769 sll->sll_hatype = dev->type;
770 sll->sll_protocol = skb->protocol;
771 sll->sll_pkttype = skb->pkt_type;
772 if (unlikely(po->origdev))
773 sll->sll_ifindex = orig_dev->ifindex;
775 sll->sll_ifindex = dev->ifindex;
777 __packet_set_status(po, h.raw, status);
780 struct page *p_start, *p_end;
781 u8 *h_end = h.raw + macoff + snaplen - 1;
783 p_start = virt_to_page(h.raw);
784 p_end = virt_to_page(h_end);
785 while (p_start <= p_end) {
786 flush_dcache_page(p_start);
791 sk->sk_data_ready(sk, 0);
794 if (skb_head != skb->data && skb_shared(skb)) {
795 skb->data = skb_head;
803 po->stats.tp_drops++;
804 spin_unlock(&sk->sk_receive_queue.lock);
806 sk->sk_data_ready(sk, 0);
811 static void tpacket_destruct_skb(struct sk_buff *skb)
813 struct packet_sock *po = pkt_sk(skb->sk);
818 if (likely(po->tx_ring.pg_vec)) {
819 ph = skb_shinfo(skb)->destructor_arg;
820 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
821 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
822 atomic_dec(&po->tx_ring.pending);
823 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
829 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
830 void *frame, struct net_device *dev, int size_max,
831 __be16 proto, unsigned char *addr)
834 struct tpacket_hdr *h1;
835 struct tpacket2_hdr *h2;
838 int to_write, offset, len, tp_len, nr_frags, len_max;
839 struct socket *sock = po->sk.sk_socket;
846 skb->protocol = proto;
848 skb->priority = po->sk.sk_priority;
849 skb->mark = po->sk.sk_mark;
850 skb_shinfo(skb)->destructor_arg = ph.raw;
852 switch (po->tp_version) {
854 tp_len = ph.h2->tp_len;
857 tp_len = ph.h1->tp_len;
860 if (unlikely(tp_len > size_max)) {
861 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
865 skb_reserve(skb, LL_RESERVED_SPACE(dev));
866 skb_reset_network_header(skb);
868 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
871 if (sock->type == SOCK_DGRAM) {
872 err = dev_hard_header(skb, dev, ntohs(proto), addr,
874 if (unlikely(err < 0))
876 } else if (dev->hard_header_len) {
877 /* net device doesn't like empty head */
878 if (unlikely(tp_len <= dev->hard_header_len)) {
879 pr_err("packet size is too short (%d < %d)\n",
880 tp_len, dev->hard_header_len);
884 skb_push(skb, dev->hard_header_len);
885 err = skb_store_bits(skb, 0, data,
886 dev->hard_header_len);
890 data += dev->hard_header_len;
891 to_write -= dev->hard_header_len;
895 page = virt_to_page(data);
896 offset = offset_in_page(data);
897 len_max = PAGE_SIZE - offset;
898 len = ((to_write > len_max) ? len_max : to_write);
900 skb->data_len = to_write;
901 skb->len += to_write;
902 skb->truesize += to_write;
903 atomic_add(to_write, &po->sk.sk_wmem_alloc);
905 while (likely(to_write)) {
906 nr_frags = skb_shinfo(skb)->nr_frags;
908 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
909 pr_err("Packet exceed the number of skb frags(%lu)\n",
914 flush_dcache_page(page);
916 skb_fill_page_desc(skb,
918 page++, offset, len);
922 len = ((to_write > len_max) ? len_max : to_write);
928 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
932 struct net_device *dev;
934 int ifindex, err, reserve = 0;
936 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
937 int tp_len, size_max;
942 sock = po->sk.sk_socket;
944 mutex_lock(&po->pg_vec_lock);
948 ifindex = po->ifindex;
953 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
955 if (msg->msg_namelen < (saddr->sll_halen
956 + offsetof(struct sockaddr_ll,
959 ifindex = saddr->sll_ifindex;
960 proto = saddr->sll_protocol;
961 addr = saddr->sll_addr;
964 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
966 if (unlikely(dev == NULL))
969 reserve = dev->hard_header_len;
972 if (unlikely(!(dev->flags & IFF_UP)))
975 size_max = po->tx_ring.frame_size
976 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
978 if (size_max > dev->mtu + reserve)
979 size_max = dev->mtu + reserve;
982 ph = packet_current_frame(po, &po->tx_ring,
983 TP_STATUS_SEND_REQUEST);
985 if (unlikely(ph == NULL)) {
990 status = TP_STATUS_SEND_REQUEST;
991 skb = sock_alloc_send_skb(&po->sk,
992 LL_ALLOCATED_SPACE(dev)
993 + sizeof(struct sockaddr_ll),
996 if (unlikely(skb == NULL))
999 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1002 if (unlikely(tp_len < 0)) {
1004 __packet_set_status(po, ph,
1005 TP_STATUS_AVAILABLE);
1006 packet_increment_head(&po->tx_ring);
1010 status = TP_STATUS_WRONG_FORMAT;
1016 skb->destructor = tpacket_destruct_skb;
1017 __packet_set_status(po, ph, TP_STATUS_SENDING);
1018 atomic_inc(&po->tx_ring.pending);
1020 status = TP_STATUS_SEND_REQUEST;
1021 err = dev_queue_xmit(skb);
1022 if (unlikely(err > 0)) {
1023 err = net_xmit_errno(err);
1024 if (err && __packet_get_status(po, ph) ==
1025 TP_STATUS_AVAILABLE) {
1026 /* skb was destructed already */
1031 * skb was dropped but not destructed yet;
1032 * let's treat it like congestion or err < 0
1036 packet_increment_head(&po->tx_ring);
1038 } while (likely((ph != NULL) ||
1039 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1040 (atomic_read(&po->tx_ring.pending))))
1047 __packet_set_status(po, ph, status);
1052 mutex_unlock(&po->pg_vec_lock);
1056 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1057 size_t reserve, size_t len,
1058 size_t linear, int noblock,
1061 struct sk_buff *skb;
1063 /* Under a page? Don't bother with paged skb. */
1064 if (prepad + len < PAGE_SIZE || !linear)
1067 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1072 skb_reserve(skb, reserve);
1073 skb_put(skb, linear);
1074 skb->data_len = len - linear;
1075 skb->len += len - linear;
1080 static int packet_snd(struct socket *sock,
1081 struct msghdr *msg, size_t len)
1083 struct sock *sk = sock->sk;
1084 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1085 struct sk_buff *skb;
1086 struct net_device *dev;
1088 unsigned char *addr;
1089 int ifindex, err, reserve = 0;
1090 struct virtio_net_hdr vnet_hdr = { 0 };
1093 struct packet_sock *po = pkt_sk(sk);
1094 unsigned short gso_type = 0;
1097 * Get and verify the address.
1100 if (saddr == NULL) {
1101 ifindex = po->ifindex;
1106 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1108 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1110 ifindex = saddr->sll_ifindex;
1111 proto = saddr->sll_protocol;
1112 addr = saddr->sll_addr;
1116 dev = dev_get_by_index(sock_net(sk), ifindex);
1120 if (sock->type == SOCK_RAW)
1121 reserve = dev->hard_header_len;
1124 if (!(dev->flags & IFF_UP))
1127 if (po->has_vnet_hdr) {
1128 vnet_hdr_len = sizeof(vnet_hdr);
1131 if (len < vnet_hdr_len)
1134 len -= vnet_hdr_len;
1136 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1141 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1142 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1144 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1145 vnet_hdr.csum_offset + 2;
1148 if (vnet_hdr.hdr_len > len)
1151 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1152 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1153 case VIRTIO_NET_HDR_GSO_TCPV4:
1154 gso_type = SKB_GSO_TCPV4;
1156 case VIRTIO_NET_HDR_GSO_TCPV6:
1157 gso_type = SKB_GSO_TCPV6;
1159 case VIRTIO_NET_HDR_GSO_UDP:
1160 gso_type = SKB_GSO_UDP;
1166 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1167 gso_type |= SKB_GSO_TCP_ECN;
1169 if (vnet_hdr.gso_size == 0)
1176 if (!gso_type && (len > dev->mtu+reserve))
1180 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1181 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1182 msg->msg_flags & MSG_DONTWAIT, &err);
1186 skb_set_network_header(skb, reserve);
1189 if (sock->type == SOCK_DGRAM &&
1190 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1193 /* Returns -EFAULT on error */
1194 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1197 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
1201 skb->protocol = proto;
1203 skb->priority = sk->sk_priority;
1204 skb->mark = sk->sk_mark;
1206 if (po->has_vnet_hdr) {
1207 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1208 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1209 vnet_hdr.csum_offset)) {
1215 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1216 skb_shinfo(skb)->gso_type = gso_type;
1218 /* Header must be checked, and gso_segs computed. */
1219 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1220 skb_shinfo(skb)->gso_segs = 0;
1222 len += vnet_hdr_len;
1229 err = dev_queue_xmit(skb);
1230 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1246 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1247 struct msghdr *msg, size_t len)
1249 struct sock *sk = sock->sk;
1250 struct packet_sock *po = pkt_sk(sk);
1251 if (po->tx_ring.pg_vec)
1252 return tpacket_snd(po, msg);
1254 return packet_snd(sock, msg, len);
1258 * Close a PACKET socket. This is fairly simple. We immediately go
1259 * to 'closed' state and remove our protocol entry in the device list.
1262 static int packet_release(struct socket *sock)
1264 struct sock *sk = sock->sk;
1265 struct packet_sock *po;
1267 struct tpacket_req req;
1275 spin_lock_bh(&net->packet.sklist_lock);
1276 sk_del_node_init_rcu(sk);
1277 sock_prot_inuse_add(net, sk->sk_prot, -1);
1278 spin_unlock_bh(&net->packet.sklist_lock);
1280 spin_lock(&po->bind_lock);
1283 * Remove from protocol table
1287 __dev_remove_pack(&po->prot_hook);
1290 spin_unlock(&po->bind_lock);
1292 packet_flush_mclist(sk);
1294 memset(&req, 0, sizeof(req));
1296 if (po->rx_ring.pg_vec)
1297 packet_set_ring(sk, &req, 1, 0);
1299 if (po->tx_ring.pg_vec)
1300 packet_set_ring(sk, &req, 1, 1);
1304 * Now the socket is dead. No more input will appear.
1311 skb_queue_purge(&sk->sk_receive_queue);
1312 sk_refcnt_debug_release(sk);
1319 * Attach a packet hook.
1322 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1324 struct packet_sock *po = pkt_sk(sk);
1326 * Detach an existing hook if present.
1331 spin_lock(&po->bind_lock);
1336 spin_unlock(&po->bind_lock);
1337 dev_remove_pack(&po->prot_hook);
1338 spin_lock(&po->bind_lock);
1342 po->prot_hook.type = protocol;
1343 po->prot_hook.dev = dev;
1345 po->ifindex = dev ? dev->ifindex : 0;
1350 if (!dev || (dev->flags & IFF_UP)) {
1351 dev_add_pack(&po->prot_hook);
1355 sk->sk_err = ENETDOWN;
1356 if (!sock_flag(sk, SOCK_DEAD))
1357 sk->sk_error_report(sk);
1361 spin_unlock(&po->bind_lock);
1367 * Bind a packet socket to a device
1370 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1373 struct sock *sk = sock->sk;
1375 struct net_device *dev;
1382 if (addr_len != sizeof(struct sockaddr))
1384 strlcpy(name, uaddr->sa_data, sizeof(name));
1386 dev = dev_get_by_name(sock_net(sk), name);
1388 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1394 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1396 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1397 struct sock *sk = sock->sk;
1398 struct net_device *dev = NULL;
1406 if (addr_len < sizeof(struct sockaddr_ll))
1408 if (sll->sll_family != AF_PACKET)
1411 if (sll->sll_ifindex) {
1413 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1417 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1425 static struct proto packet_proto = {
1427 .owner = THIS_MODULE,
1428 .obj_size = sizeof(struct packet_sock),
1432 * Create a packet of type SOCK_PACKET.
1435 static int packet_create(struct net *net, struct socket *sock, int protocol,
1439 struct packet_sock *po;
1440 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1443 if (!capable(CAP_NET_RAW))
1445 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1446 sock->type != SOCK_PACKET)
1447 return -ESOCKTNOSUPPORT;
1449 sock->state = SS_UNCONNECTED;
1452 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1456 sock->ops = &packet_ops;
1457 if (sock->type == SOCK_PACKET)
1458 sock->ops = &packet_ops_spkt;
1460 sock_init_data(sock, sk);
1463 sk->sk_family = PF_PACKET;
1466 sk->sk_destruct = packet_sock_destruct;
1467 sk_refcnt_debug_inc(sk);
1470 * Attach a protocol block
1473 spin_lock_init(&po->bind_lock);
1474 mutex_init(&po->pg_vec_lock);
1475 po->prot_hook.func = packet_rcv;
1477 if (sock->type == SOCK_PACKET)
1478 po->prot_hook.func = packet_rcv_spkt;
1480 po->prot_hook.af_packet_priv = sk;
1483 po->prot_hook.type = proto;
1484 dev_add_pack(&po->prot_hook);
1489 spin_lock_bh(&net->packet.sklist_lock);
1490 sk_add_node_rcu(sk, &net->packet.sklist);
1491 sock_prot_inuse_add(net, &packet_proto, 1);
1492 spin_unlock_bh(&net->packet.sklist_lock);
1499 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1501 struct sock_exterr_skb *serr;
1502 struct sk_buff *skb, *skb2;
1506 skb = skb_dequeue(&sk->sk_error_queue);
1512 msg->msg_flags |= MSG_TRUNC;
1515 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1519 sock_recv_timestamp(msg, sk, skb);
1521 serr = SKB_EXT_ERR(skb);
1522 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1523 sizeof(serr->ee), &serr->ee);
1525 msg->msg_flags |= MSG_ERRQUEUE;
1528 /* Reset and regenerate socket error */
1529 spin_lock_bh(&sk->sk_error_queue.lock);
1531 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1532 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1533 spin_unlock_bh(&sk->sk_error_queue.lock);
1534 sk->sk_error_report(sk);
1536 spin_unlock_bh(&sk->sk_error_queue.lock);
1545 * Pull a packet from our receive queue and hand it to the user.
1546 * If necessary we block.
1549 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1550 struct msghdr *msg, size_t len, int flags)
1552 struct sock *sk = sock->sk;
1553 struct sk_buff *skb;
1555 struct sockaddr_ll *sll;
1556 int vnet_hdr_len = 0;
1559 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1563 /* What error should we return now? EUNATTACH? */
1564 if (pkt_sk(sk)->ifindex < 0)
1568 if (flags & MSG_ERRQUEUE) {
1569 err = packet_recv_error(sk, msg, len);
1574 * Call the generic datagram receiver. This handles all sorts
1575 * of horrible races and re-entrancy so we can forget about it
1576 * in the protocol layers.
1578 * Now it will return ENETDOWN, if device have just gone down,
1579 * but then it will block.
1582 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1585 * An error occurred so return it. Because skb_recv_datagram()
1586 * handles the blocking we don't see and worry about blocking
1593 if (pkt_sk(sk)->has_vnet_hdr) {
1594 struct virtio_net_hdr vnet_hdr = { 0 };
1597 vnet_hdr_len = sizeof(vnet_hdr);
1598 if ((len -= vnet_hdr_len) < 0)
1601 if (skb_is_gso(skb)) {
1602 struct skb_shared_info *sinfo = skb_shinfo(skb);
1604 /* This is a hint as to how much should be linear. */
1605 vnet_hdr.hdr_len = skb_headlen(skb);
1606 vnet_hdr.gso_size = sinfo->gso_size;
1607 if (sinfo->gso_type & SKB_GSO_TCPV4)
1608 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1609 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1610 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1611 else if (sinfo->gso_type & SKB_GSO_UDP)
1612 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1613 else if (sinfo->gso_type & SKB_GSO_FCOE)
1617 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1618 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1620 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1622 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1623 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1624 vnet_hdr.csum_start = skb->csum_start -
1626 vnet_hdr.csum_offset = skb->csum_offset;
1627 } /* else everything is zero */
1629 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1636 * If the address length field is there to be filled in, we fill
1640 sll = &PACKET_SKB_CB(skb)->sa.ll;
1641 if (sock->type == SOCK_PACKET)
1642 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1644 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1647 * You lose any data beyond the buffer you gave. If it worries a
1648 * user program they can ask the device for its MTU anyway.
1654 msg->msg_flags |= MSG_TRUNC;
1657 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1661 sock_recv_ts_and_drops(msg, sk, skb);
1664 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1667 if (pkt_sk(sk)->auxdata) {
1668 struct tpacket_auxdata aux;
1670 aux.tp_status = TP_STATUS_USER;
1671 if (skb->ip_summed == CHECKSUM_PARTIAL)
1672 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1673 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1674 aux.tp_snaplen = skb->len;
1676 aux.tp_net = skb_network_offset(skb);
1677 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1679 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1683 * Free or return the buffer as appropriate. Again this
1684 * hides all the races and re-entrancy issues from us.
1686 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1689 skb_free_datagram(sk, skb);
1694 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1695 int *uaddr_len, int peer)
1697 struct net_device *dev;
1698 struct sock *sk = sock->sk;
1703 uaddr->sa_family = AF_PACKET;
1705 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1707 strlcpy(uaddr->sa_data, dev->name, 15);
1709 memset(uaddr->sa_data, 0, 14);
1711 *uaddr_len = sizeof(*uaddr);
1716 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1717 int *uaddr_len, int peer)
1719 struct net_device *dev;
1720 struct sock *sk = sock->sk;
1721 struct packet_sock *po = pkt_sk(sk);
1722 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1727 sll->sll_family = AF_PACKET;
1728 sll->sll_ifindex = po->ifindex;
1729 sll->sll_protocol = po->num;
1731 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1733 sll->sll_hatype = dev->type;
1734 sll->sll_halen = dev->addr_len;
1735 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1737 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1741 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1746 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1750 case PACKET_MR_MULTICAST:
1751 if (i->alen != dev->addr_len)
1754 return dev_mc_add(dev, i->addr);
1756 return dev_mc_del(dev, i->addr);
1758 case PACKET_MR_PROMISC:
1759 return dev_set_promiscuity(dev, what);
1761 case PACKET_MR_ALLMULTI:
1762 return dev_set_allmulti(dev, what);
1764 case PACKET_MR_UNICAST:
1765 if (i->alen != dev->addr_len)
1768 return dev_uc_add(dev, i->addr);
1770 return dev_uc_del(dev, i->addr);
1778 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1780 for ( ; i; i = i->next) {
1781 if (i->ifindex == dev->ifindex)
1782 packet_dev_mc(dev, i, what);
1786 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1788 struct packet_sock *po = pkt_sk(sk);
1789 struct packet_mclist *ml, *i;
1790 struct net_device *dev;
1796 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1801 if (mreq->mr_alen > dev->addr_len)
1805 i = kmalloc(sizeof(*i), GFP_KERNEL);
1810 for (ml = po->mclist; ml; ml = ml->next) {
1811 if (ml->ifindex == mreq->mr_ifindex &&
1812 ml->type == mreq->mr_type &&
1813 ml->alen == mreq->mr_alen &&
1814 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1816 /* Free the new element ... */
1822 i->type = mreq->mr_type;
1823 i->ifindex = mreq->mr_ifindex;
1824 i->alen = mreq->mr_alen;
1825 memcpy(i->addr, mreq->mr_address, i->alen);
1827 i->next = po->mclist;
1829 err = packet_dev_mc(dev, i, 1);
1831 po->mclist = i->next;
1840 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1842 struct packet_mclist *ml, **mlp;
1846 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1847 if (ml->ifindex == mreq->mr_ifindex &&
1848 ml->type == mreq->mr_type &&
1849 ml->alen == mreq->mr_alen &&
1850 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1851 if (--ml->count == 0) {
1852 struct net_device *dev;
1854 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1856 packet_dev_mc(dev, ml, -1);
1864 return -EADDRNOTAVAIL;
1867 static void packet_flush_mclist(struct sock *sk)
1869 struct packet_sock *po = pkt_sk(sk);
1870 struct packet_mclist *ml;
1876 while ((ml = po->mclist) != NULL) {
1877 struct net_device *dev;
1879 po->mclist = ml->next;
1880 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1882 packet_dev_mc(dev, ml, -1);
1889 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1891 struct sock *sk = sock->sk;
1892 struct packet_sock *po = pkt_sk(sk);
1895 if (level != SOL_PACKET)
1896 return -ENOPROTOOPT;
1899 case PACKET_ADD_MEMBERSHIP:
1900 case PACKET_DROP_MEMBERSHIP:
1902 struct packet_mreq_max mreq;
1904 memset(&mreq, 0, sizeof(mreq));
1905 if (len < sizeof(struct packet_mreq))
1907 if (len > sizeof(mreq))
1909 if (copy_from_user(&mreq, optval, len))
1911 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1913 if (optname == PACKET_ADD_MEMBERSHIP)
1914 ret = packet_mc_add(sk, &mreq);
1916 ret = packet_mc_drop(sk, &mreq);
1920 case PACKET_RX_RING:
1921 case PACKET_TX_RING:
1923 struct tpacket_req req;
1925 if (optlen < sizeof(req))
1927 if (pkt_sk(sk)->has_vnet_hdr)
1929 if (copy_from_user(&req, optval, sizeof(req)))
1931 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1933 case PACKET_COPY_THRESH:
1937 if (optlen != sizeof(val))
1939 if (copy_from_user(&val, optval, sizeof(val)))
1942 pkt_sk(sk)->copy_thresh = val;
1945 case PACKET_VERSION:
1949 if (optlen != sizeof(val))
1951 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1953 if (copy_from_user(&val, optval, sizeof(val)))
1958 po->tp_version = val;
1964 case PACKET_RESERVE:
1968 if (optlen != sizeof(val))
1970 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1972 if (copy_from_user(&val, optval, sizeof(val)))
1974 po->tp_reserve = val;
1981 if (optlen != sizeof(val))
1983 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1985 if (copy_from_user(&val, optval, sizeof(val)))
1987 po->tp_loss = !!val;
1990 case PACKET_AUXDATA:
1994 if (optlen < sizeof(val))
1996 if (copy_from_user(&val, optval, sizeof(val)))
1999 po->auxdata = !!val;
2002 case PACKET_ORIGDEV:
2006 if (optlen < sizeof(val))
2008 if (copy_from_user(&val, optval, sizeof(val)))
2011 po->origdev = !!val;
2014 case PACKET_VNET_HDR:
2018 if (sock->type != SOCK_RAW)
2020 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2022 if (optlen < sizeof(val))
2024 if (copy_from_user(&val, optval, sizeof(val)))
2027 po->has_vnet_hdr = !!val;
2031 return -ENOPROTOOPT;
2035 static int packet_getsockopt(struct socket *sock, int level, int optname,
2036 char __user *optval, int __user *optlen)
2040 struct sock *sk = sock->sk;
2041 struct packet_sock *po = pkt_sk(sk);
2043 struct tpacket_stats st;
2045 if (level != SOL_PACKET)
2046 return -ENOPROTOOPT;
2048 if (get_user(len, optlen))
2055 case PACKET_STATISTICS:
2056 if (len > sizeof(struct tpacket_stats))
2057 len = sizeof(struct tpacket_stats);
2058 spin_lock_bh(&sk->sk_receive_queue.lock);
2060 memset(&po->stats, 0, sizeof(st));
2061 spin_unlock_bh(&sk->sk_receive_queue.lock);
2062 st.tp_packets += st.tp_drops;
2066 case PACKET_AUXDATA:
2067 if (len > sizeof(int))
2073 case PACKET_ORIGDEV:
2074 if (len > sizeof(int))
2080 case PACKET_VNET_HDR:
2081 if (len > sizeof(int))
2083 val = po->has_vnet_hdr;
2087 case PACKET_VERSION:
2088 if (len > sizeof(int))
2090 val = po->tp_version;
2094 if (len > sizeof(int))
2096 if (copy_from_user(&val, optval, len))
2100 val = sizeof(struct tpacket_hdr);
2103 val = sizeof(struct tpacket2_hdr);
2110 case PACKET_RESERVE:
2111 if (len > sizeof(unsigned int))
2112 len = sizeof(unsigned int);
2113 val = po->tp_reserve;
2117 if (len > sizeof(unsigned int))
2118 len = sizeof(unsigned int);
2123 return -ENOPROTOOPT;
2126 if (put_user(len, optlen))
2128 if (copy_to_user(optval, data, len))
2134 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2137 struct hlist_node *node;
2138 struct net_device *dev = data;
2139 struct net *net = dev_net(dev);
2142 sk_for_each_rcu(sk, node, &net->packet.sklist) {
2143 struct packet_sock *po = pkt_sk(sk);
2146 case NETDEV_UNREGISTER:
2148 packet_dev_mclist(dev, po->mclist, -1);
2152 if (dev->ifindex == po->ifindex) {
2153 spin_lock(&po->bind_lock);
2155 __dev_remove_pack(&po->prot_hook);
2158 sk->sk_err = ENETDOWN;
2159 if (!sock_flag(sk, SOCK_DEAD))
2160 sk->sk_error_report(sk);
2162 if (msg == NETDEV_UNREGISTER) {
2164 po->prot_hook.dev = NULL;
2166 spin_unlock(&po->bind_lock);
2170 if (dev->ifindex == po->ifindex) {
2171 spin_lock(&po->bind_lock);
2172 if (po->num && !po->running) {
2173 dev_add_pack(&po->prot_hook);
2177 spin_unlock(&po->bind_lock);
2187 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2190 struct sock *sk = sock->sk;
2195 int amount = sk_wmem_alloc_get(sk);
2197 return put_user(amount, (int __user *)arg);
2201 struct sk_buff *skb;
2204 spin_lock_bh(&sk->sk_receive_queue.lock);
2205 skb = skb_peek(&sk->sk_receive_queue);
2208 spin_unlock_bh(&sk->sk_receive_queue.lock);
2209 return put_user(amount, (int __user *)arg);
2212 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2214 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2224 case SIOCGIFBRDADDR:
2225 case SIOCSIFBRDADDR:
2226 case SIOCGIFNETMASK:
2227 case SIOCSIFNETMASK:
2228 case SIOCGIFDSTADDR:
2229 case SIOCSIFDSTADDR:
2231 return inet_dgram_ops.ioctl(sock, cmd, arg);
2235 return -ENOIOCTLCMD;
2240 static unsigned int packet_poll(struct file *file, struct socket *sock,
2243 struct sock *sk = sock->sk;
2244 struct packet_sock *po = pkt_sk(sk);
2245 unsigned int mask = datagram_poll(file, sock, wait);
2247 spin_lock_bh(&sk->sk_receive_queue.lock);
2248 if (po->rx_ring.pg_vec) {
2249 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2250 mask |= POLLIN | POLLRDNORM;
2252 spin_unlock_bh(&sk->sk_receive_queue.lock);
2253 spin_lock_bh(&sk->sk_write_queue.lock);
2254 if (po->tx_ring.pg_vec) {
2255 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2256 mask |= POLLOUT | POLLWRNORM;
2258 spin_unlock_bh(&sk->sk_write_queue.lock);
2263 /* Dirty? Well, I still did not learn better way to account
2267 static void packet_mm_open(struct vm_area_struct *vma)
2269 struct file *file = vma->vm_file;
2270 struct socket *sock = file->private_data;
2271 struct sock *sk = sock->sk;
2274 atomic_inc(&pkt_sk(sk)->mapped);
2277 static void packet_mm_close(struct vm_area_struct *vma)
2279 struct file *file = vma->vm_file;
2280 struct socket *sock = file->private_data;
2281 struct sock *sk = sock->sk;
2284 atomic_dec(&pkt_sk(sk)->mapped);
2287 static const struct vm_operations_struct packet_mmap_ops = {
2288 .open = packet_mm_open,
2289 .close = packet_mm_close,
2292 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
2296 for (i = 0; i < len; i++) {
2297 if (likely(pg_vec[i]))
2298 free_pages((unsigned long) pg_vec[i], order);
2303 static inline char *alloc_one_pg_vec_page(unsigned long order)
2305 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2307 return (char *) __get_free_pages(gfp_flags, order);
2310 static char **alloc_pg_vec(struct tpacket_req *req, int order)
2312 unsigned int block_nr = req->tp_block_nr;
2316 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2317 if (unlikely(!pg_vec))
2320 for (i = 0; i < block_nr; i++) {
2321 pg_vec[i] = alloc_one_pg_vec_page(order);
2322 if (unlikely(!pg_vec[i]))
2323 goto out_free_pgvec;
2330 free_pg_vec(pg_vec, order, block_nr);
2335 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2336 int closing, int tx_ring)
2338 char **pg_vec = NULL;
2339 struct packet_sock *po = pkt_sk(sk);
2340 int was_running, order = 0;
2341 struct packet_ring_buffer *rb;
2342 struct sk_buff_head *rb_queue;
2346 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2347 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2351 if (atomic_read(&po->mapped))
2353 if (atomic_read(&rb->pending))
2357 if (req->tp_block_nr) {
2358 /* Sanity tests and some calculations */
2360 if (unlikely(rb->pg_vec))
2363 switch (po->tp_version) {
2365 po->tp_hdrlen = TPACKET_HDRLEN;
2368 po->tp_hdrlen = TPACKET2_HDRLEN;
2373 if (unlikely((int)req->tp_block_size <= 0))
2375 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2377 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2380 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2383 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2384 if (unlikely(rb->frames_per_block <= 0))
2386 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2391 order = get_order(req->tp_block_size);
2392 pg_vec = alloc_pg_vec(req, order);
2393 if (unlikely(!pg_vec))
2399 if (unlikely(req->tp_frame_nr))
2405 /* Detach socket from network */
2406 spin_lock(&po->bind_lock);
2407 was_running = po->running;
2410 __dev_remove_pack(&po->prot_hook);
2415 spin_unlock(&po->bind_lock);
2420 mutex_lock(&po->pg_vec_lock);
2421 if (closing || atomic_read(&po->mapped) == 0) {
2423 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2424 spin_lock_bh(&rb_queue->lock);
2425 pg_vec = XC(rb->pg_vec, pg_vec);
2426 rb->frame_max = (req->tp_frame_nr - 1);
2428 rb->frame_size = req->tp_frame_size;
2429 spin_unlock_bh(&rb_queue->lock);
2431 order = XC(rb->pg_vec_order, order);
2432 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2434 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2435 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2436 tpacket_rcv : packet_rcv;
2437 skb_queue_purge(rb_queue);
2439 if (atomic_read(&po->mapped))
2440 pr_err("packet_mmap: vma is busy: %d\n",
2441 atomic_read(&po->mapped));
2443 mutex_unlock(&po->pg_vec_lock);
2445 spin_lock(&po->bind_lock);
2446 if (was_running && !po->running) {
2450 dev_add_pack(&po->prot_hook);
2452 spin_unlock(&po->bind_lock);
2457 free_pg_vec(pg_vec, order, req->tp_block_nr);
2462 static int packet_mmap(struct file *file, struct socket *sock,
2463 struct vm_area_struct *vma)
2465 struct sock *sk = sock->sk;
2466 struct packet_sock *po = pkt_sk(sk);
2467 unsigned long size, expected_size;
2468 struct packet_ring_buffer *rb;
2469 unsigned long start;
2476 mutex_lock(&po->pg_vec_lock);
2479 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2481 expected_size += rb->pg_vec_len
2487 if (expected_size == 0)
2490 size = vma->vm_end - vma->vm_start;
2491 if (size != expected_size)
2494 start = vma->vm_start;
2495 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2496 if (rb->pg_vec == NULL)
2499 for (i = 0; i < rb->pg_vec_len; i++) {
2500 struct page *page = virt_to_page(rb->pg_vec[i]);
2503 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2505 err = vm_insert_page(vma, start, page);
2513 atomic_inc(&po->mapped);
2514 vma->vm_ops = &packet_mmap_ops;
2518 mutex_unlock(&po->pg_vec_lock);
2522 static const struct proto_ops packet_ops_spkt = {
2523 .family = PF_PACKET,
2524 .owner = THIS_MODULE,
2525 .release = packet_release,
2526 .bind = packet_bind_spkt,
2527 .connect = sock_no_connect,
2528 .socketpair = sock_no_socketpair,
2529 .accept = sock_no_accept,
2530 .getname = packet_getname_spkt,
2531 .poll = datagram_poll,
2532 .ioctl = packet_ioctl,
2533 .listen = sock_no_listen,
2534 .shutdown = sock_no_shutdown,
2535 .setsockopt = sock_no_setsockopt,
2536 .getsockopt = sock_no_getsockopt,
2537 .sendmsg = packet_sendmsg_spkt,
2538 .recvmsg = packet_recvmsg,
2539 .mmap = sock_no_mmap,
2540 .sendpage = sock_no_sendpage,
2543 static const struct proto_ops packet_ops = {
2544 .family = PF_PACKET,
2545 .owner = THIS_MODULE,
2546 .release = packet_release,
2547 .bind = packet_bind,
2548 .connect = sock_no_connect,
2549 .socketpair = sock_no_socketpair,
2550 .accept = sock_no_accept,
2551 .getname = packet_getname,
2552 .poll = packet_poll,
2553 .ioctl = packet_ioctl,
2554 .listen = sock_no_listen,
2555 .shutdown = sock_no_shutdown,
2556 .setsockopt = packet_setsockopt,
2557 .getsockopt = packet_getsockopt,
2558 .sendmsg = packet_sendmsg,
2559 .recvmsg = packet_recvmsg,
2560 .mmap = packet_mmap,
2561 .sendpage = sock_no_sendpage,
2564 static const struct net_proto_family packet_family_ops = {
2565 .family = PF_PACKET,
2566 .create = packet_create,
2567 .owner = THIS_MODULE,
2570 static struct notifier_block packet_netdev_notifier = {
2571 .notifier_call = packet_notifier,
2574 #ifdef CONFIG_PROC_FS
2576 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2579 struct net *net = seq_file_net(seq);
2582 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2585 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2587 struct net *net = seq_file_net(seq);
2588 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2591 static void packet_seq_stop(struct seq_file *seq, void *v)
2597 static int packet_seq_show(struct seq_file *seq, void *v)
2599 if (v == SEQ_START_TOKEN)
2600 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2602 struct sock *s = sk_entry(v);
2603 const struct packet_sock *po = pkt_sk(s);
2606 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2608 atomic_read(&s->sk_refcnt),
2613 atomic_read(&s->sk_rmem_alloc),
2621 static const struct seq_operations packet_seq_ops = {
2622 .start = packet_seq_start,
2623 .next = packet_seq_next,
2624 .stop = packet_seq_stop,
2625 .show = packet_seq_show,
2628 static int packet_seq_open(struct inode *inode, struct file *file)
2630 return seq_open_net(inode, file, &packet_seq_ops,
2631 sizeof(struct seq_net_private));
2634 static const struct file_operations packet_seq_fops = {
2635 .owner = THIS_MODULE,
2636 .open = packet_seq_open,
2638 .llseek = seq_lseek,
2639 .release = seq_release_net,
2644 static int __net_init packet_net_init(struct net *net)
2646 spin_lock_init(&net->packet.sklist_lock);
2647 INIT_HLIST_HEAD(&net->packet.sklist);
2649 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2655 static void __net_exit packet_net_exit(struct net *net)
2657 proc_net_remove(net, "packet");
2660 static struct pernet_operations packet_net_ops = {
2661 .init = packet_net_init,
2662 .exit = packet_net_exit,
2666 static void __exit packet_exit(void)
2668 unregister_netdevice_notifier(&packet_netdev_notifier);
2669 unregister_pernet_subsys(&packet_net_ops);
2670 sock_unregister(PF_PACKET);
2671 proto_unregister(&packet_proto);
2674 static int __init packet_init(void)
2676 int rc = proto_register(&packet_proto, 0);
2681 sock_register(&packet_family_ops);
2682 register_pernet_subsys(&packet_net_ops);
2683 register_netdevice_notifier(&packet_netdev_notifier);
2688 module_init(packet_init);
2689 module_exit(packet_exit);
2690 MODULE_LICENSE("GPL");
2691 MODULE_ALIAS_NETPROTO(PF_PACKET);