1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/cdev.h>
17 #include <linux/idr.h>
20 #include <net/net_namespace.h>
21 #include <net/rtnetlink.h>
23 #include <linux/virtio_net.h>
26 * A macvtap queue is the central object of this driver, it connects
27 * an open character device to a macvlan interface. There can be
28 * multiple queues on one interface, which map back to queues
29 * implemented in hardware on the underlying device.
31 * macvtap_proto is used to allocate queues through the sock allocation
35 struct macvtap_queue {
40 struct macvlan_dev __rcu *vlan;
45 struct list_head next;
48 static struct proto macvtap_proto = {
51 .obj_size = sizeof (struct macvtap_queue),
55 * Variables for dealing with macvtaps device numbers.
57 static dev_t macvtap_major;
58 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
59 static DEFINE_MUTEX(minor_lock);
60 static DEFINE_IDR(minor_idr);
62 #define GOODCOPY_LEN 128
63 static struct class *macvtap_class;
64 static struct cdev macvtap_cdev;
66 static const struct proto_ops macvtap_socket_ops;
68 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
69 NETIF_F_TSO6 | NETIF_F_UFO)
70 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
71 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
73 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
75 return rcu_dereference(dev->rx_handler_data);
80 * The macvtap_queue and the macvlan_dev are loosely coupled, the
81 * pointers from one to the other can only be read while rcu_read_lock
84 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
85 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
86 * q->vlan becomes inaccessible. When the files gets closed,
87 * macvtap_get_queue() fails.
89 * There may still be references to the struct sock inside of the
90 * queue from outbound SKBs, but these never reference back to the
91 * file or the dev. The data structure is freed through __sk_free
92 * when both our references and any pending SKBs are gone.
95 static int macvtap_enable_queue(struct net_device *dev, struct file *file,
96 struct macvtap_queue *q)
98 struct macvlan_dev *vlan = netdev_priv(dev);
107 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
108 q->queue_index = vlan->numvtaps;
116 static int macvtap_set_queue(struct net_device *dev, struct file *file,
117 struct macvtap_queue *q)
119 struct macvlan_dev *vlan = netdev_priv(dev);
123 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
127 rcu_assign_pointer(q->vlan, vlan);
128 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
132 q->queue_index = vlan->numvtaps;
134 file->private_data = q;
135 list_add_tail(&q->next, &vlan->queue_list);
145 static int macvtap_disable_queue(struct macvtap_queue *q)
147 struct macvlan_dev *vlan;
148 struct macvtap_queue *nq;
154 vlan = rtnl_dereference(q->vlan);
157 int index = q->queue_index;
158 BUG_ON(index >= vlan->numvtaps);
159 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
160 nq->queue_index = index;
162 rcu_assign_pointer(vlan->taps[index], nq);
163 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
173 * The file owning the queue got closed, give up both
174 * the reference that the files holds as well as the
175 * one from the macvlan_dev if that still exists.
177 * Using the spinlock makes sure that we don't get
178 * to the queue again after destroying it.
180 static void macvtap_put_queue(struct macvtap_queue *q)
182 struct macvlan_dev *vlan;
185 vlan = rtnl_dereference(q->vlan);
189 BUG_ON(macvtap_disable_queue(q));
192 RCU_INIT_POINTER(q->vlan, NULL);
194 list_del_init(&q->next);
204 * Select a queue based on the rxq of the device on which this packet
205 * arrived. If the incoming device is not mq, calculate a flow hash
206 * to select a queue. If all fails, find the first available queue.
207 * Cache vlan->numvtaps since it can become zero during the execution
210 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
213 struct macvlan_dev *vlan = netdev_priv(dev);
214 struct macvtap_queue *tap = NULL;
215 /* Access to taps array is protected by rcu, but access to numvtaps
216 * isn't. Below we use it to lookup a queue, but treat it as a hint
217 * and validate that the result isn't NULL - in case we are
218 * racing against queue removal.
220 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
226 /* Check if we can use flow to select a queue */
227 rxq = skb_get_hash(skb);
229 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
233 if (likely(skb_rx_queue_recorded(skb))) {
234 rxq = skb_get_rx_queue(skb);
236 while (unlikely(rxq >= numvtaps))
239 tap = rcu_dereference(vlan->taps[rxq]);
243 tap = rcu_dereference(vlan->taps[0]);
249 * The net_device is going away, give up the reference
250 * that it holds on all queues and safely set the pointer
251 * from the queues to NULL.
253 static void macvtap_del_queues(struct net_device *dev)
255 struct macvlan_dev *vlan = netdev_priv(dev);
256 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
260 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
261 list_del_init(&q->next);
263 RCU_INIT_POINTER(q->vlan, NULL);
268 for (i = 0; i < vlan->numvtaps; i++)
269 RCU_INIT_POINTER(vlan->taps[i], NULL);
270 BUG_ON(vlan->numvtaps);
271 BUG_ON(vlan->numqueues);
272 /* guarantee that any future macvtap_set_queue will fail */
273 vlan->numvtaps = MAX_MACVTAP_QUEUES;
275 for (--j; j >= 0; j--)
276 sock_put(&qlist[j]->sk);
279 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
281 struct sk_buff *skb = *pskb;
282 struct net_device *dev = skb->dev;
283 struct macvlan_dev *vlan;
284 struct macvtap_queue *q;
285 netdev_features_t features = TAP_FEATURES;
287 vlan = macvtap_get_vlan_rcu(dev);
289 return RX_HANDLER_PASS;
291 q = macvtap_get_queue(dev, skb);
293 return RX_HANDLER_PASS;
295 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
298 skb_push(skb, ETH_HLEN);
300 /* Apply the forward feature mask so that we perform segmentation
301 * according to users wishes. This only works if VNET_HDR is
304 if (q->flags & IFF_VNET_HDR)
305 features |= vlan->tap_features;
306 if (netif_needs_gso(skb, features)) {
307 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
313 skb_queue_tail(&q->sk.sk_receive_queue, skb);
319 struct sk_buff *nskb = segs->next;
322 skb_queue_tail(&q->sk.sk_receive_queue, segs);
326 skb_queue_tail(&q->sk.sk_receive_queue, skb);
330 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
331 return RX_HANDLER_CONSUMED;
334 /* Count errors/drops only here, thus don't care about args. */
335 macvlan_count_rx(vlan, 0, 0, 0);
337 return RX_HANDLER_CONSUMED;
340 static int macvtap_get_minor(struct macvlan_dev *vlan)
342 int retval = -ENOMEM;
344 mutex_lock(&minor_lock);
345 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
347 vlan->minor = retval;
348 } else if (retval == -ENOSPC) {
349 printk(KERN_ERR "too many macvtap devices\n");
352 mutex_unlock(&minor_lock);
353 return retval < 0 ? retval : 0;
356 static void macvtap_free_minor(struct macvlan_dev *vlan)
358 mutex_lock(&minor_lock);
360 idr_remove(&minor_idr, vlan->minor);
363 mutex_unlock(&minor_lock);
366 static struct net_device *dev_get_by_macvtap_minor(int minor)
368 struct net_device *dev = NULL;
369 struct macvlan_dev *vlan;
371 mutex_lock(&minor_lock);
372 vlan = idr_find(&minor_idr, minor);
377 mutex_unlock(&minor_lock);
381 static int macvtap_newlink(struct net *src_net,
382 struct net_device *dev,
384 struct nlattr *data[])
386 struct macvlan_dev *vlan = netdev_priv(dev);
389 INIT_LIST_HEAD(&vlan->queue_list);
391 /* Since macvlan supports all offloads by default, make
392 * tap support all offloads also.
394 vlan->tap_features = TUN_OFFLOADS;
396 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
400 /* Don't put anything that may fail after macvlan_common_newlink
401 * because we can't undo what it does.
403 return macvlan_common_newlink(src_net, dev, tb, data);
406 static void macvtap_dellink(struct net_device *dev,
407 struct list_head *head)
409 netdev_rx_handler_unregister(dev);
410 macvtap_del_queues(dev);
411 macvlan_dellink(dev, head);
414 static void macvtap_setup(struct net_device *dev)
416 macvlan_common_setup(dev);
417 dev->tx_queue_len = TUN_READQ_SIZE;
420 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
422 .setup = macvtap_setup,
423 .newlink = macvtap_newlink,
424 .dellink = macvtap_dellink,
428 static void macvtap_sock_write_space(struct sock *sk)
430 wait_queue_head_t *wqueue;
432 if (!sock_writeable(sk) ||
433 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
436 wqueue = sk_sleep(sk);
437 if (wqueue && waitqueue_active(wqueue))
438 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
441 static void macvtap_sock_destruct(struct sock *sk)
443 skb_queue_purge(&sk->sk_receive_queue);
446 static int macvtap_open(struct inode *inode, struct file *file)
448 struct net *net = current->nsproxy->net_ns;
449 struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
450 struct macvtap_queue *q;
458 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
463 RCU_INIT_POINTER(q->sock.wq, &q->wq);
464 init_waitqueue_head(&q->wq.wait);
465 q->sock.type = SOCK_RAW;
466 q->sock.state = SS_CONNECTED;
468 q->sock.ops = &macvtap_socket_ops;
469 sock_init_data(&q->sock, &q->sk);
470 q->sk.sk_write_space = macvtap_sock_write_space;
471 q->sk.sk_destruct = macvtap_sock_destruct;
472 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
473 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
476 * so far only KVM virtio_net uses macvtap, enable zero copy between
477 * guest kernel and host kernel when lower device supports zerocopy
479 * The macvlan supports zerocopy iff the lower device supports zero
480 * copy so we don't have to look at the lower device directly.
482 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
483 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
485 err = macvtap_set_queue(dev, file, q);
496 static int macvtap_release(struct inode *inode, struct file *file)
498 struct macvtap_queue *q = file->private_data;
499 macvtap_put_queue(q);
503 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
505 struct macvtap_queue *q = file->private_data;
506 unsigned int mask = POLLERR;
512 poll_wait(file, &q->wq.wait, wait);
514 if (!skb_queue_empty(&q->sk.sk_receive_queue))
515 mask |= POLLIN | POLLRDNORM;
517 if (sock_writeable(&q->sk) ||
518 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
519 sock_writeable(&q->sk)))
520 mask |= POLLOUT | POLLWRNORM;
526 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
527 size_t len, size_t linear,
528 int noblock, int *err)
532 /* Under a page? Don't bother with paged skb. */
533 if (prepad + len < PAGE_SIZE || !linear)
536 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
541 skb_reserve(skb, prepad);
542 skb_put(skb, linear);
543 skb->data_len = len - linear;
544 skb->len += len - linear;
550 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
551 * be shared with the tun/tap driver.
553 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
554 struct virtio_net_hdr *vnet_hdr)
556 unsigned short gso_type = 0;
557 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
558 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
559 case VIRTIO_NET_HDR_GSO_TCPV4:
560 gso_type = SKB_GSO_TCPV4;
562 case VIRTIO_NET_HDR_GSO_TCPV6:
563 gso_type = SKB_GSO_TCPV6;
565 case VIRTIO_NET_HDR_GSO_UDP:
566 gso_type = SKB_GSO_UDP;
572 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
573 gso_type |= SKB_GSO_TCP_ECN;
575 if (vnet_hdr->gso_size == 0)
579 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
580 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
581 vnet_hdr->csum_offset))
585 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
586 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
587 skb_shinfo(skb)->gso_type = gso_type;
589 /* Header must be checked, and gso_segs computed. */
590 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
591 skb_shinfo(skb)->gso_segs = 0;
596 static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
597 struct virtio_net_hdr *vnet_hdr)
599 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
601 if (skb_is_gso(skb)) {
602 struct skb_shared_info *sinfo = skb_shinfo(skb);
604 /* This is a hint as to how much should be linear. */
605 vnet_hdr->hdr_len = skb_headlen(skb);
606 vnet_hdr->gso_size = sinfo->gso_size;
607 if (sinfo->gso_type & SKB_GSO_TCPV4)
608 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
609 else if (sinfo->gso_type & SKB_GSO_TCPV6)
610 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
611 else if (sinfo->gso_type & SKB_GSO_UDP)
612 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
615 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
616 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
618 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
620 if (skb->ip_summed == CHECKSUM_PARTIAL) {
621 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
622 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
623 vnet_hdr->csum_offset = skb->csum_offset;
624 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
625 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
626 } /* else everything is zero */
629 /* Get packet from user space buffer */
630 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
631 const struct iovec *iv, unsigned long total_len,
632 size_t count, int noblock)
634 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
636 struct macvlan_dev *vlan;
637 unsigned long len = total_len;
639 struct virtio_net_hdr vnet_hdr = { 0 };
640 int vnet_hdr_len = 0;
642 bool zerocopy = false;
645 if (q->flags & IFF_VNET_HDR) {
646 vnet_hdr_len = q->vnet_hdr_sz;
649 if (len < vnet_hdr_len)
653 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
657 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
658 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
660 vnet_hdr.hdr_len = vnet_hdr.csum_start +
661 vnet_hdr.csum_offset + 2;
663 if (vnet_hdr.hdr_len > len)
668 if (unlikely(len < ETH_HLEN))
672 if (unlikely(count > UIO_MAXIOV))
675 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
676 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
677 if (copylen > good_linear)
678 copylen = good_linear;
680 if (iov_pages(iv, vnet_hdr_len + copylen, count)
687 if (vnet_hdr.hdr_len > good_linear)
688 linear = good_linear;
690 linear = vnet_hdr.hdr_len;
693 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
694 linear, noblock, &err);
699 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
701 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
703 if (!err && m && m->msg_control) {
704 struct ubuf_info *uarg = m->msg_control;
705 uarg->callback(uarg, false);
712 skb_set_network_header(skb, ETH_HLEN);
713 skb_reset_mac_header(skb);
714 skb->protocol = eth_hdr(skb)->h_proto;
717 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
722 skb_probe_transport_header(skb, ETH_HLEN);
725 vlan = rcu_dereference(q->vlan);
726 /* copy skb_ubuf_info for callback when skb has no error */
728 skb_shinfo(skb)->destructor_arg = m->msg_control;
729 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
730 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
733 skb->dev = vlan->dev;
747 vlan = rcu_dereference(q->vlan);
749 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
755 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
756 unsigned long count, loff_t pos)
758 struct file *file = iocb->ki_filp;
759 ssize_t result = -ENOLINK;
760 struct macvtap_queue *q = file->private_data;
762 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
763 file->f_flags & O_NONBLOCK);
767 /* Put packet to the user space buffer */
768 static ssize_t macvtap_put_user(struct macvtap_queue *q,
769 const struct sk_buff *skb,
770 const struct iovec *iv, int len)
773 int vnet_hdr_len = 0;
777 if (q->flags & IFF_VNET_HDR) {
778 struct virtio_net_hdr vnet_hdr;
779 vnet_hdr_len = q->vnet_hdr_sz;
780 if ((len -= vnet_hdr_len) < 0)
783 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
785 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
788 total = copied = vnet_hdr_len;
791 if (!vlan_tx_tag_present(skb))
792 len = min_t(int, skb->len, len);
799 veth.h_vlan_proto = skb->vlan_proto;
800 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
802 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
803 len = min_t(int, skb->len + VLAN_HLEN, len);
806 copy = min_t(int, vlan_offset, len);
807 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
813 copy = min_t(int, sizeof(veth), len);
814 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
821 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
824 return ret ? ret : total;
827 static ssize_t macvtap_do_read(struct macvtap_queue *q,
828 const struct iovec *iv, unsigned long len,
837 prepare_to_wait(sk_sleep(&q->sk), &wait,
840 /* Read frames from the queue */
841 skb = skb_dequeue(&q->sk.sk_receive_queue);
847 if (signal_pending(current)) {
851 /* Nothing to read, let's sleep */
855 ret = macvtap_put_user(q, skb, iv, len);
861 finish_wait(sk_sleep(&q->sk), &wait);
865 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
866 unsigned long count, loff_t pos)
868 struct file *file = iocb->ki_filp;
869 struct macvtap_queue *q = file->private_data;
870 ssize_t len, ret = 0;
872 len = iov_length(iv, count);
878 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
879 ret = min_t(ssize_t, ret, len);
886 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
888 struct macvlan_dev *vlan;
891 vlan = rtnl_dereference(q->vlan);
898 static void macvtap_put_vlan(struct macvlan_dev *vlan)
903 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
905 struct macvtap_queue *q = file->private_data;
906 struct macvlan_dev *vlan;
909 vlan = macvtap_get_vlan(q);
913 if (flags & IFF_ATTACH_QUEUE)
914 ret = macvtap_enable_queue(vlan->dev, file, q);
915 else if (flags & IFF_DETACH_QUEUE)
916 ret = macvtap_disable_queue(q);
920 macvtap_put_vlan(vlan);
924 static int set_offload(struct macvtap_queue *q, unsigned long arg)
926 struct macvlan_dev *vlan;
927 netdev_features_t features;
928 netdev_features_t feature_mask = 0;
930 vlan = rtnl_dereference(q->vlan);
934 features = vlan->dev->features;
936 if (arg & TUN_F_CSUM) {
937 feature_mask = NETIF_F_HW_CSUM;
939 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
940 if (arg & TUN_F_TSO_ECN)
941 feature_mask |= NETIF_F_TSO_ECN;
942 if (arg & TUN_F_TSO4)
943 feature_mask |= NETIF_F_TSO;
944 if (arg & TUN_F_TSO6)
945 feature_mask |= NETIF_F_TSO6;
949 feature_mask |= NETIF_F_UFO;
952 /* tun/tap driver inverts the usage for TSO offloads, where
953 * setting the TSO bit means that the userspace wants to
954 * accept TSO frames and turning it off means that user space
955 * does not support TSO.
956 * For macvtap, we have to invert it to mean the same thing.
957 * When user space turns off TSO, we turn off GSO/LRO so that
958 * user-space will not receive TSO frames.
960 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
961 features |= RX_OFFLOADS;
963 features &= ~RX_OFFLOADS;
965 /* tap_features are the same as features on tun/tap and
966 * reflect user expectations.
968 vlan->tap_features = feature_mask;
969 vlan->set_features = features;
970 netdev_update_features(vlan->dev);
976 * provide compatibility with generic tun/tap interface
978 static long macvtap_ioctl(struct file *file, unsigned int cmd,
981 struct macvtap_queue *q = file->private_data;
982 struct macvlan_dev *vlan;
983 void __user *argp = (void __user *)arg;
984 struct ifreq __user *ifr = argp;
985 unsigned int __user *up = argp;
987 int __user *sp = argp;
993 /* ignore the name, just look at flags */
994 if (get_user(u, &ifr->ifr_flags))
998 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
999 (IFF_NO_PI | IFF_TAP))
1008 vlan = macvtap_get_vlan(q);
1015 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1016 put_user(q->flags, &ifr->ifr_flags))
1018 macvtap_put_vlan(vlan);
1023 if (get_user(u, &ifr->ifr_flags))
1026 ret = macvtap_ioctl_set_queue(file, u);
1030 case TUNGETFEATURES:
1031 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1032 IFF_MULTI_QUEUE, up))
1037 if (get_user(u, up))
1040 q->sk.sk_sndbuf = u;
1043 case TUNGETVNETHDRSZ:
1045 if (put_user(s, sp))
1049 case TUNSETVNETHDRSZ:
1050 if (get_user(s, sp))
1052 if (s < (int)sizeof(struct virtio_net_hdr))
1059 /* let the user check for future flags */
1060 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1061 TUN_F_TSO_ECN | TUN_F_UFO))
1065 ret = set_offload(q, arg);
1074 #ifdef CONFIG_COMPAT
1075 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1078 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1082 static const struct file_operations macvtap_fops = {
1083 .owner = THIS_MODULE,
1084 .open = macvtap_open,
1085 .release = macvtap_release,
1086 .aio_read = macvtap_aio_read,
1087 .aio_write = macvtap_aio_write,
1088 .poll = macvtap_poll,
1089 .llseek = no_llseek,
1090 .unlocked_ioctl = macvtap_ioctl,
1091 #ifdef CONFIG_COMPAT
1092 .compat_ioctl = macvtap_compat_ioctl,
1096 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1097 struct msghdr *m, size_t total_len)
1099 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1100 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
1101 m->msg_flags & MSG_DONTWAIT);
1104 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1105 struct msghdr *m, size_t total_len,
1108 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1110 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1112 ret = macvtap_do_read(q, m->msg_iov, total_len,
1113 flags & MSG_DONTWAIT);
1114 if (ret > total_len) {
1115 m->msg_flags |= MSG_TRUNC;
1116 ret = flags & MSG_TRUNC ? ret : total_len;
1121 /* Ops structure to mimic raw sockets with tun */
1122 static const struct proto_ops macvtap_socket_ops = {
1123 .sendmsg = macvtap_sendmsg,
1124 .recvmsg = macvtap_recvmsg,
1127 /* Get an underlying socket object from tun file. Returns error unless file is
1128 * attached to a device. The returned object works like a packet socket, it
1129 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1130 * holding a reference to the file for as long as the socket is in use. */
1131 struct socket *macvtap_get_socket(struct file *file)
1133 struct macvtap_queue *q;
1134 if (file->f_op != &macvtap_fops)
1135 return ERR_PTR(-EINVAL);
1136 q = file->private_data;
1138 return ERR_PTR(-EBADFD);
1141 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1143 static int macvtap_device_event(struct notifier_block *unused,
1144 unsigned long event, void *ptr)
1146 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1147 struct macvlan_dev *vlan;
1148 struct device *classdev;
1152 if (dev->rtnl_link_ops != &macvtap_link_ops)
1155 vlan = netdev_priv(dev);
1158 case NETDEV_REGISTER:
1159 /* Create the device node here after the network device has
1160 * been registered but before register_netdevice has
1163 err = macvtap_get_minor(vlan);
1165 return notifier_from_errno(err);
1167 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1168 classdev = device_create(macvtap_class, &dev->dev, devt,
1169 dev, "tap%d", dev->ifindex);
1170 if (IS_ERR(classdev)) {
1171 macvtap_free_minor(vlan);
1172 return notifier_from_errno(PTR_ERR(classdev));
1175 case NETDEV_UNREGISTER:
1176 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1177 device_destroy(macvtap_class, devt);
1178 macvtap_free_minor(vlan);
1185 static struct notifier_block macvtap_notifier_block __read_mostly = {
1186 .notifier_call = macvtap_device_event,
1189 static int macvtap_init(void)
1193 err = alloc_chrdev_region(&macvtap_major, 0,
1194 MACVTAP_NUM_DEVS, "macvtap");
1198 cdev_init(&macvtap_cdev, &macvtap_fops);
1199 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1203 macvtap_class = class_create(THIS_MODULE, "macvtap");
1204 if (IS_ERR(macvtap_class)) {
1205 err = PTR_ERR(macvtap_class);
1209 err = register_netdevice_notifier(&macvtap_notifier_block);
1213 err = macvlan_link_register(&macvtap_link_ops);
1220 unregister_netdevice_notifier(&macvtap_notifier_block);
1222 class_unregister(macvtap_class);
1224 cdev_del(&macvtap_cdev);
1226 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1230 module_init(macvtap_init);
1232 static void macvtap_exit(void)
1234 rtnl_link_unregister(&macvtap_link_ops);
1235 unregister_netdevice_notifier(&macvtap_notifier_block);
1236 class_unregister(macvtap_class);
1237 cdev_del(&macvtap_cdev);
1238 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1240 module_exit(macvtap_exit);
1242 MODULE_ALIAS_RTNL_LINK("macvtap");
1243 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1244 MODULE_LICENSE("GPL");