]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/virtio_net.c
2055386eda58d4bd21a73eaaebc6b839f3012d7f
[karo-tx-linux.git] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29
30 static int napi_weight = 128;
31 module_param(napi_weight, int, 0444);
32
33 static bool csum = true, gso = true;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36
37 /* FIXME: MTU in config. */
38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_SEND_COMMAND_SG_MAX    2
42 #define VIRTNET_DRIVER_VERSION "1.0.0"
43
44 struct virtnet_stats {
45         struct u64_stats_sync syncp;
46         u64 tx_bytes;
47         u64 tx_packets;
48
49         u64 rx_bytes;
50         u64 rx_packets;
51 };
52
53 struct virtnet_info {
54         struct virtio_device *vdev;
55         struct virtqueue *rvq, *svq, *cvq;
56         struct net_device *dev;
57         struct napi_struct napi;
58         unsigned int status;
59
60         /* Number of input buffers, and max we've ever had. */
61         unsigned int num, max;
62
63         /* I like... big packets and I cannot lie! */
64         bool big_packets;
65
66         /* Host will merge rx buffers for big packets (shake it! shake it!) */
67         bool mergeable_rx_bufs;
68
69         /* Active statistics */
70         struct virtnet_stats __percpu *stats;
71
72         /* Work struct for refilling if we run low on memory. */
73         struct delayed_work refill;
74
75         /* Chain pages by the private ptr. */
76         struct page *pages;
77
78         /* fragments + linear part + virtio header */
79         struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
80         struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
81 };
82
83 struct skb_vnet_hdr {
84         union {
85                 struct virtio_net_hdr hdr;
86                 struct virtio_net_hdr_mrg_rxbuf mhdr;
87         };
88         unsigned int num_sg;
89 };
90
91 struct padded_vnet_hdr {
92         struct virtio_net_hdr hdr;
93         /*
94          * virtio_net_hdr should be in a separated sg buffer because of a
95          * QEMU bug, and data sg buffer shares same page with this header sg.
96          * This padding makes next sg 16 byte aligned after virtio_net_hdr.
97          */
98         char padding[6];
99 };
100
101 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
102 {
103         return (struct skb_vnet_hdr *)skb->cb;
104 }
105
106 /*
107  * private is used to chain pages for big packets, put the whole
108  * most recent used list in the beginning for reuse
109  */
110 static void give_pages(struct virtnet_info *vi, struct page *page)
111 {
112         struct page *end;
113
114         /* Find end of list, sew whole thing into vi->pages. */
115         for (end = page; end->private; end = (struct page *)end->private);
116         end->private = (unsigned long)vi->pages;
117         vi->pages = page;
118 }
119
120 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
121 {
122         struct page *p = vi->pages;
123
124         if (p) {
125                 vi->pages = (struct page *)p->private;
126                 /* clear private here, it is used to chain pages */
127                 p->private = 0;
128         } else
129                 p = alloc_page(gfp_mask);
130         return p;
131 }
132
133 static void skb_xmit_done(struct virtqueue *svq)
134 {
135         struct virtnet_info *vi = svq->vdev->priv;
136
137         /* Suppress further interrupts. */
138         virtqueue_disable_cb(svq);
139
140         /* We were probably waiting for more output buffers. */
141         netif_wake_queue(vi->dev);
142 }
143
144 static void set_skb_frag(struct sk_buff *skb, struct page *page,
145                          unsigned int offset, unsigned int *len)
146 {
147         int size = min((unsigned)PAGE_SIZE - offset, *len);
148         int i = skb_shinfo(skb)->nr_frags;
149
150         __skb_fill_page_desc(skb, i, page, offset, size);
151
152         skb->data_len += size;
153         skb->len += size;
154         skb->truesize += PAGE_SIZE;
155         skb_shinfo(skb)->nr_frags++;
156         *len -= size;
157 }
158
159 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
160                                    struct page *page, unsigned int len)
161 {
162         struct sk_buff *skb;
163         struct skb_vnet_hdr *hdr;
164         unsigned int copy, hdr_len, offset;
165         char *p;
166
167         p = page_address(page);
168
169         /* copy small packet so we can reuse these pages for small data */
170         skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
171         if (unlikely(!skb))
172                 return NULL;
173
174         hdr = skb_vnet_hdr(skb);
175
176         if (vi->mergeable_rx_bufs) {
177                 hdr_len = sizeof hdr->mhdr;
178                 offset = hdr_len;
179         } else {
180                 hdr_len = sizeof hdr->hdr;
181                 offset = sizeof(struct padded_vnet_hdr);
182         }
183
184         memcpy(hdr, p, hdr_len);
185
186         len -= hdr_len;
187         p += offset;
188
189         copy = len;
190         if (copy > skb_tailroom(skb))
191                 copy = skb_tailroom(skb);
192         memcpy(skb_put(skb, copy), p, copy);
193
194         len -= copy;
195         offset += copy;
196
197         /*
198          * Verify that we can indeed put this data into a skb.
199          * This is here to handle cases when the device erroneously
200          * tries to receive more than is possible. This is usually
201          * the case of a broken device.
202          */
203         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
204                 if (net_ratelimit())
205                         pr_debug("%s: too much data\n", skb->dev->name);
206                 dev_kfree_skb(skb);
207                 return NULL;
208         }
209
210         while (len) {
211                 set_skb_frag(skb, page, offset, &len);
212                 page = (struct page *)page->private;
213                 offset = 0;
214         }
215
216         if (page)
217                 give_pages(vi, page);
218
219         return skb;
220 }
221
222 static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
223 {
224         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
225         struct page *page;
226         int num_buf, i, len;
227
228         num_buf = hdr->mhdr.num_buffers;
229         while (--num_buf) {
230                 i = skb_shinfo(skb)->nr_frags;
231                 if (i >= MAX_SKB_FRAGS) {
232                         pr_debug("%s: packet too long\n", skb->dev->name);
233                         skb->dev->stats.rx_length_errors++;
234                         return -EINVAL;
235                 }
236                 page = virtqueue_get_buf(vi->rvq, &len);
237                 if (!page) {
238                         pr_debug("%s: rx error: %d buffers missing\n",
239                                  skb->dev->name, hdr->mhdr.num_buffers);
240                         skb->dev->stats.rx_length_errors++;
241                         return -EINVAL;
242                 }
243
244                 if (len > PAGE_SIZE)
245                         len = PAGE_SIZE;
246
247                 set_skb_frag(skb, page, 0, &len);
248
249                 --vi->num;
250         }
251         return 0;
252 }
253
254 static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
255 {
256         struct virtnet_info *vi = netdev_priv(dev);
257         struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
258         struct sk_buff *skb;
259         struct page *page;
260         struct skb_vnet_hdr *hdr;
261
262         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
263                 pr_debug("%s: short packet %i\n", dev->name, len);
264                 dev->stats.rx_length_errors++;
265                 if (vi->mergeable_rx_bufs || vi->big_packets)
266                         give_pages(vi, buf);
267                 else
268                         dev_kfree_skb(buf);
269                 return;
270         }
271
272         if (!vi->mergeable_rx_bufs && !vi->big_packets) {
273                 skb = buf;
274                 len -= sizeof(struct virtio_net_hdr);
275                 skb_trim(skb, len);
276         } else {
277                 page = buf;
278                 skb = page_to_skb(vi, page, len);
279                 if (unlikely(!skb)) {
280                         dev->stats.rx_dropped++;
281                         give_pages(vi, page);
282                         return;
283                 }
284                 if (vi->mergeable_rx_bufs)
285                         if (receive_mergeable(vi, skb)) {
286                                 dev_kfree_skb(skb);
287                                 return;
288                         }
289         }
290
291         hdr = skb_vnet_hdr(skb);
292
293         u64_stats_update_begin(&stats->syncp);
294         stats->rx_bytes += skb->len;
295         stats->rx_packets++;
296         u64_stats_update_end(&stats->syncp);
297
298         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
299                 pr_debug("Needs csum!\n");
300                 if (!skb_partial_csum_set(skb,
301                                           hdr->hdr.csum_start,
302                                           hdr->hdr.csum_offset))
303                         goto frame_err;
304         } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
305                 skb->ip_summed = CHECKSUM_UNNECESSARY;
306         }
307
308         skb->protocol = eth_type_trans(skb, dev);
309         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
310                  ntohs(skb->protocol), skb->len, skb->pkt_type);
311
312         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
313                 pr_debug("GSO!\n");
314                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
315                 case VIRTIO_NET_HDR_GSO_TCPV4:
316                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
317                         break;
318                 case VIRTIO_NET_HDR_GSO_UDP:
319                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
320                         break;
321                 case VIRTIO_NET_HDR_GSO_TCPV6:
322                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
323                         break;
324                 default:
325                         if (net_ratelimit())
326                                 printk(KERN_WARNING "%s: bad gso type %u.\n",
327                                        dev->name, hdr->hdr.gso_type);
328                         goto frame_err;
329                 }
330
331                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
332                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
333
334                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
335                 if (skb_shinfo(skb)->gso_size == 0) {
336                         if (net_ratelimit())
337                                 printk(KERN_WARNING "%s: zero gso size.\n",
338                                        dev->name);
339                         goto frame_err;
340                 }
341
342                 /* Header must be checked, and gso_segs computed. */
343                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
344                 skb_shinfo(skb)->gso_segs = 0;
345         }
346
347         netif_receive_skb(skb);
348         return;
349
350 frame_err:
351         dev->stats.rx_frame_errors++;
352         dev_kfree_skb(skb);
353 }
354
355 static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
356 {
357         struct sk_buff *skb;
358         struct skb_vnet_hdr *hdr;
359         int err;
360
361         skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
362         if (unlikely(!skb))
363                 return -ENOMEM;
364
365         skb_put(skb, MAX_PACKET_LEN);
366
367         hdr = skb_vnet_hdr(skb);
368         sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
369
370         skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
371
372         err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
373         if (err < 0)
374                 dev_kfree_skb(skb);
375
376         return err;
377 }
378
379 static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
380 {
381         struct page *first, *list = NULL;
382         char *p;
383         int i, err, offset;
384
385         /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
386         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
387                 first = get_a_page(vi, gfp);
388                 if (!first) {
389                         if (list)
390                                 give_pages(vi, list);
391                         return -ENOMEM;
392                 }
393                 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
394
395                 /* chain new page in list head to match sg */
396                 first->private = (unsigned long)list;
397                 list = first;
398         }
399
400         first = get_a_page(vi, gfp);
401         if (!first) {
402                 give_pages(vi, list);
403                 return -ENOMEM;
404         }
405         p = page_address(first);
406
407         /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
408         /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
409         sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
410
411         /* vi->rx_sg[1] for data packet, from offset */
412         offset = sizeof(struct padded_vnet_hdr);
413         sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
414
415         /* chain first in list head */
416         first->private = (unsigned long)list;
417         err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
418                                     first, gfp);
419         if (err < 0)
420                 give_pages(vi, first);
421
422         return err;
423 }
424
425 static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
426 {
427         struct page *page;
428         int err;
429
430         page = get_a_page(vi, gfp);
431         if (!page)
432                 return -ENOMEM;
433
434         sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
435
436         err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
437         if (err < 0)
438                 give_pages(vi, page);
439
440         return err;
441 }
442
443 /*
444  * Returns false if we couldn't fill entirely (OOM).
445  *
446  * Normally run in the receive path, but can also be run from ndo_open
447  * before we're receiving packets, or from refill_work which is
448  * careful to disable receiving (using napi_disable).
449  */
450 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
451 {
452         int err;
453         bool oom;
454
455         do {
456                 if (vi->mergeable_rx_bufs)
457                         err = add_recvbuf_mergeable(vi, gfp);
458                 else if (vi->big_packets)
459                         err = add_recvbuf_big(vi, gfp);
460                 else
461                         err = add_recvbuf_small(vi, gfp);
462
463                 oom = err == -ENOMEM;
464                 if (err < 0)
465                         break;
466                 ++vi->num;
467         } while (err > 0);
468         if (unlikely(vi->num > vi->max))
469                 vi->max = vi->num;
470         virtqueue_kick(vi->rvq);
471         return !oom;
472 }
473
474 static void skb_recv_done(struct virtqueue *rvq)
475 {
476         struct virtnet_info *vi = rvq->vdev->priv;
477         /* Schedule NAPI, Suppress further interrupts if successful. */
478         if (napi_schedule_prep(&vi->napi)) {
479                 virtqueue_disable_cb(rvq);
480                 __napi_schedule(&vi->napi);
481         }
482 }
483
484 static void virtnet_napi_enable(struct virtnet_info *vi)
485 {
486         napi_enable(&vi->napi);
487
488         /* If all buffers were filled by other side before we napi_enabled, we
489          * won't get another interrupt, so process any outstanding packets
490          * now.  virtnet_poll wants re-enable the queue, so we disable here.
491          * We synchronize against interrupts via NAPI_STATE_SCHED */
492         if (napi_schedule_prep(&vi->napi)) {
493                 virtqueue_disable_cb(vi->rvq);
494                 __napi_schedule(&vi->napi);
495         }
496 }
497
498 static void refill_work(struct work_struct *work)
499 {
500         struct virtnet_info *vi;
501         bool still_empty;
502
503         vi = container_of(work, struct virtnet_info, refill.work);
504         napi_disable(&vi->napi);
505         still_empty = !try_fill_recv(vi, GFP_KERNEL);
506         virtnet_napi_enable(vi);
507
508         /* In theory, this can happen: if we don't get any buffers in
509          * we will *never* try to fill again. */
510         if (still_empty)
511                 queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
512 }
513
514 static int virtnet_poll(struct napi_struct *napi, int budget)
515 {
516         struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
517         void *buf;
518         unsigned int len, received = 0;
519
520 again:
521         while (received < budget &&
522                (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
523                 receive_buf(vi->dev, buf, len);
524                 --vi->num;
525                 received++;
526         }
527
528         if (vi->num < vi->max / 2) {
529                 if (!try_fill_recv(vi, GFP_ATOMIC))
530                         queue_delayed_work(system_nrt_wq, &vi->refill, 0);
531         }
532
533         /* Out of packets? */
534         if (received < budget) {
535                 napi_complete(napi);
536                 if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
537                     napi_schedule_prep(napi)) {
538                         virtqueue_disable_cb(vi->rvq);
539                         __napi_schedule(napi);
540                         goto again;
541                 }
542         }
543
544         return received;
545 }
546
547 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
548 {
549         struct sk_buff *skb;
550         unsigned int len, tot_sgs = 0;
551         struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
552
553         while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
554                 pr_debug("Sent skb %p\n", skb);
555
556                 u64_stats_update_begin(&stats->syncp);
557                 stats->tx_bytes += skb->len;
558                 stats->tx_packets++;
559                 u64_stats_update_end(&stats->syncp);
560
561                 tot_sgs += skb_vnet_hdr(skb)->num_sg;
562                 dev_kfree_skb_any(skb);
563         }
564         return tot_sgs;
565 }
566
567 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
568 {
569         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
570         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
571
572         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
573
574         if (skb->ip_summed == CHECKSUM_PARTIAL) {
575                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
576                 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
577                 hdr->hdr.csum_offset = skb->csum_offset;
578         } else {
579                 hdr->hdr.flags = 0;
580                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
581         }
582
583         if (skb_is_gso(skb)) {
584                 hdr->hdr.hdr_len = skb_headlen(skb);
585                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
586                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
587                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
588                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
589                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
590                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
591                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
592                 else
593                         BUG();
594                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
595                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
596         } else {
597                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
598                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
599         }
600
601         hdr->mhdr.num_buffers = 0;
602
603         /* Encode metadata header at front. */
604         if (vi->mergeable_rx_bufs)
605                 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
606         else
607                 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
608
609         hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
610         return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
611                                         0, skb);
612 }
613
614 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
615 {
616         struct virtnet_info *vi = netdev_priv(dev);
617         int capacity;
618
619         /* Free up any pending old buffers before queueing new ones. */
620         free_old_xmit_skbs(vi);
621
622         /* Try to transmit */
623         capacity = xmit_skb(vi, skb);
624
625         /* This can happen with OOM and indirect buffers. */
626         if (unlikely(capacity < 0)) {
627                 if (net_ratelimit()) {
628                         if (likely(capacity == -ENOMEM)) {
629                                 dev_warn(&dev->dev,
630                                          "TX queue failure: out of memory\n");
631                         } else {
632                                 dev->stats.tx_fifo_errors++;
633                                 dev_warn(&dev->dev,
634                                          "Unexpected TX queue failure: %d\n",
635                                          capacity);
636                         }
637                 }
638                 dev->stats.tx_dropped++;
639                 kfree_skb(skb);
640                 return NETDEV_TX_OK;
641         }
642         virtqueue_kick(vi->svq);
643
644         /* Don't wait up for transmitted skbs to be freed. */
645         skb_orphan(skb);
646         nf_reset(skb);
647
648         /* Apparently nice girls don't return TX_BUSY; stop the queue
649          * before it gets out of hand.  Naturally, this wastes entries. */
650         if (capacity < 2+MAX_SKB_FRAGS) {
651                 netif_stop_queue(dev);
652                 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
653                         /* More just got used, free them then recheck. */
654                         capacity += free_old_xmit_skbs(vi);
655                         if (capacity >= 2+MAX_SKB_FRAGS) {
656                                 netif_start_queue(dev);
657                                 virtqueue_disable_cb(vi->svq);
658                         }
659                 }
660         }
661
662         return NETDEV_TX_OK;
663 }
664
665 static int virtnet_set_mac_address(struct net_device *dev, void *p)
666 {
667         struct virtnet_info *vi = netdev_priv(dev);
668         struct virtio_device *vdev = vi->vdev;
669         int ret;
670
671         ret = eth_mac_addr(dev, p);
672         if (ret)
673                 return ret;
674
675         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
676                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
677                                   dev->dev_addr, dev->addr_len);
678
679         return 0;
680 }
681
682 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
683                                                struct rtnl_link_stats64 *tot)
684 {
685         struct virtnet_info *vi = netdev_priv(dev);
686         int cpu;
687         unsigned int start;
688
689         for_each_possible_cpu(cpu) {
690                 struct virtnet_stats __percpu *stats
691                         = per_cpu_ptr(vi->stats, cpu);
692                 u64 tpackets, tbytes, rpackets, rbytes;
693
694                 do {
695                         start = u64_stats_fetch_begin(&stats->syncp);
696                         tpackets = stats->tx_packets;
697                         tbytes   = stats->tx_bytes;
698                         rpackets = stats->rx_packets;
699                         rbytes   = stats->rx_bytes;
700                 } while (u64_stats_fetch_retry(&stats->syncp, start));
701
702                 tot->rx_packets += rpackets;
703                 tot->tx_packets += tpackets;
704                 tot->rx_bytes   += rbytes;
705                 tot->tx_bytes   += tbytes;
706         }
707
708         tot->tx_dropped = dev->stats.tx_dropped;
709         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
710         tot->rx_dropped = dev->stats.rx_dropped;
711         tot->rx_length_errors = dev->stats.rx_length_errors;
712         tot->rx_frame_errors = dev->stats.rx_frame_errors;
713
714         return tot;
715 }
716
717 #ifdef CONFIG_NET_POLL_CONTROLLER
718 static void virtnet_netpoll(struct net_device *dev)
719 {
720         struct virtnet_info *vi = netdev_priv(dev);
721
722         napi_schedule(&vi->napi);
723 }
724 #endif
725
726 static int virtnet_open(struct net_device *dev)
727 {
728         struct virtnet_info *vi = netdev_priv(dev);
729
730         /* Make sure we have some buffers: if oom use wq. */
731         if (!try_fill_recv(vi, GFP_KERNEL))
732                 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
733
734         virtnet_napi_enable(vi);
735         return 0;
736 }
737
738 /*
739  * Send command via the control virtqueue and check status.  Commands
740  * supported by the hypervisor, as indicated by feature bits, should
741  * never fail unless improperly formated.
742  */
743 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
744                                  struct scatterlist *data, int out, int in)
745 {
746         struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
747         struct virtio_net_ctrl_hdr ctrl;
748         virtio_net_ctrl_ack status = ~0;
749         unsigned int tmp;
750         int i;
751
752         /* Caller should know better */
753         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
754                 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
755
756         out++; /* Add header */
757         in++; /* Add return status */
758
759         ctrl.class = class;
760         ctrl.cmd = cmd;
761
762         sg_init_table(sg, out + in);
763
764         sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
765         for_each_sg(data, s, out + in - 2, i)
766                 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
767         sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
768
769         BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
770
771         virtqueue_kick(vi->cvq);
772
773         /*
774          * Spin for a response, the kick causes an ioport write, trapping
775          * into the hypervisor, so the request should be handled immediately.
776          */
777         while (!virtqueue_get_buf(vi->cvq, &tmp))
778                 cpu_relax();
779
780         return status == VIRTIO_NET_OK;
781 }
782
783 static int virtnet_close(struct net_device *dev)
784 {
785         struct virtnet_info *vi = netdev_priv(dev);
786
787         /* Make sure refill_work doesn't re-enable napi! */
788         cancel_delayed_work_sync(&vi->refill);
789         napi_disable(&vi->napi);
790
791         return 0;
792 }
793
794 static void virtnet_set_rx_mode(struct net_device *dev)
795 {
796         struct virtnet_info *vi = netdev_priv(dev);
797         struct scatterlist sg[2];
798         u8 promisc, allmulti;
799         struct virtio_net_ctrl_mac *mac_data;
800         struct netdev_hw_addr *ha;
801         int uc_count;
802         int mc_count;
803         void *buf;
804         int i;
805
806         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
807         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
808                 return;
809
810         promisc = ((dev->flags & IFF_PROMISC) != 0);
811         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
812
813         sg_init_one(sg, &promisc, sizeof(promisc));
814
815         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
816                                   VIRTIO_NET_CTRL_RX_PROMISC,
817                                   sg, 1, 0))
818                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
819                          promisc ? "en" : "dis");
820
821         sg_init_one(sg, &allmulti, sizeof(allmulti));
822
823         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
824                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
825                                   sg, 1, 0))
826                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
827                          allmulti ? "en" : "dis");
828
829         uc_count = netdev_uc_count(dev);
830         mc_count = netdev_mc_count(dev);
831         /* MAC filter - use one buffer for both lists */
832         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
833                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
834         mac_data = buf;
835         if (!buf) {
836                 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
837                 return;
838         }
839
840         sg_init_table(sg, 2);
841
842         /* Store the unicast list and count in the front of the buffer */
843         mac_data->entries = uc_count;
844         i = 0;
845         netdev_for_each_uc_addr(ha, dev)
846                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
847
848         sg_set_buf(&sg[0], mac_data,
849                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
850
851         /* multicast list and count fill the end */
852         mac_data = (void *)&mac_data->macs[uc_count][0];
853
854         mac_data->entries = mc_count;
855         i = 0;
856         netdev_for_each_mc_addr(ha, dev)
857                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
858
859         sg_set_buf(&sg[1], mac_data,
860                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
861
862         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
863                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
864                                   sg, 2, 0))
865                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
866
867         kfree(buf);
868 }
869
870 static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
871 {
872         struct virtnet_info *vi = netdev_priv(dev);
873         struct scatterlist sg;
874
875         sg_init_one(&sg, &vid, sizeof(vid));
876
877         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
878                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
879                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
880         return 0;
881 }
882
883 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
884 {
885         struct virtnet_info *vi = netdev_priv(dev);
886         struct scatterlist sg;
887
888         sg_init_one(&sg, &vid, sizeof(vid));
889
890         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
891                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
892                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
893         return 0;
894 }
895
896 static void virtnet_get_ringparam(struct net_device *dev,
897                                 struct ethtool_ringparam *ring)
898 {
899         struct virtnet_info *vi = netdev_priv(dev);
900
901         ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
902         ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
903         ring->rx_pending = ring->rx_max_pending;
904         ring->tx_pending = ring->tx_max_pending;
905
906 }
907
908
909 static void virtnet_get_drvinfo(struct net_device *dev,
910                                 struct ethtool_drvinfo *info)
911 {
912         struct virtnet_info *vi = netdev_priv(dev);
913         struct virtio_device *vdev = vi->vdev;
914
915         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
916         strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
917         strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
918
919 }
920
921 static const struct ethtool_ops virtnet_ethtool_ops = {
922         .get_drvinfo = virtnet_get_drvinfo,
923         .get_link = ethtool_op_get_link,
924         .get_ringparam = virtnet_get_ringparam,
925 };
926
927 #define MIN_MTU 68
928 #define MAX_MTU 65535
929
930 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
931 {
932         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
933                 return -EINVAL;
934         dev->mtu = new_mtu;
935         return 0;
936 }
937
938 static const struct net_device_ops virtnet_netdev = {
939         .ndo_open            = virtnet_open,
940         .ndo_stop            = virtnet_close,
941         .ndo_start_xmit      = start_xmit,
942         .ndo_validate_addr   = eth_validate_addr,
943         .ndo_set_mac_address = virtnet_set_mac_address,
944         .ndo_set_rx_mode     = virtnet_set_rx_mode,
945         .ndo_change_mtu      = virtnet_change_mtu,
946         .ndo_get_stats64     = virtnet_stats,
947         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
948         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
949 #ifdef CONFIG_NET_POLL_CONTROLLER
950         .ndo_poll_controller = virtnet_netpoll,
951 #endif
952 };
953
954 static void virtnet_update_status(struct virtnet_info *vi)
955 {
956         u16 v;
957
958         if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
959                               offsetof(struct virtio_net_config, status),
960                               &v) < 0)
961                 return;
962
963         /* Ignore unknown (future) status bits */
964         v &= VIRTIO_NET_S_LINK_UP;
965
966         if (vi->status == v)
967                 return;
968
969         vi->status = v;
970
971         if (vi->status & VIRTIO_NET_S_LINK_UP) {
972                 netif_carrier_on(vi->dev);
973                 netif_wake_queue(vi->dev);
974         } else {
975                 netif_carrier_off(vi->dev);
976                 netif_stop_queue(vi->dev);
977         }
978 }
979
980 static void virtnet_config_changed(struct virtio_device *vdev)
981 {
982         struct virtnet_info *vi = vdev->priv;
983
984         virtnet_update_status(vi);
985 }
986
987 static int virtnet_probe(struct virtio_device *vdev)
988 {
989         int err;
990         struct net_device *dev;
991         struct virtnet_info *vi;
992         struct virtqueue *vqs[3];
993         vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
994         const char *names[] = { "input", "output", "control" };
995         int nvqs;
996
997         /* Allocate ourselves a network device with room for our info */
998         dev = alloc_etherdev(sizeof(struct virtnet_info));
999         if (!dev)
1000                 return -ENOMEM;
1001
1002         /* Set up network device as normal. */
1003         dev->priv_flags |= IFF_UNICAST_FLT;
1004         dev->netdev_ops = &virtnet_netdev;
1005         dev->features = NETIF_F_HIGHDMA;
1006
1007         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
1008         SET_NETDEV_DEV(dev, &vdev->dev);
1009
1010         /* Do we support "hardware" checksums? */
1011         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1012                 /* This opens up the world of extra features. */
1013                 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1014                 if (csum)
1015                         dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1016
1017                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1018                         dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1019                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1020                 }
1021                 /* Individual feature bits: what can host handle? */
1022                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1023                         dev->hw_features |= NETIF_F_TSO;
1024                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1025                         dev->hw_features |= NETIF_F_TSO6;
1026                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1027                         dev->hw_features |= NETIF_F_TSO_ECN;
1028                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1029                         dev->hw_features |= NETIF_F_UFO;
1030
1031                 if (gso)
1032                         dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1033                 /* (!csum && gso) case will be fixed by register_netdev() */
1034         }
1035
1036         /* Configuration may specify what MAC to use.  Otherwise random. */
1037         if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1038                                   offsetof(struct virtio_net_config, mac),
1039                                   dev->dev_addr, dev->addr_len) < 0)
1040                 random_ether_addr(dev->dev_addr);
1041
1042         /* Set up our device-specific information */
1043         vi = netdev_priv(dev);
1044         netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
1045         vi->dev = dev;
1046         vi->vdev = vdev;
1047         vdev->priv = vi;
1048         vi->pages = NULL;
1049         vi->stats = alloc_percpu(struct virtnet_stats);
1050         err = -ENOMEM;
1051         if (vi->stats == NULL)
1052                 goto free;
1053
1054         INIT_DELAYED_WORK(&vi->refill, refill_work);
1055         sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1056         sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1057
1058         /* If we can receive ANY GSO packets, we must allocate large ones. */
1059         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1060             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1061             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1062                 vi->big_packets = true;
1063
1064         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1065                 vi->mergeable_rx_bufs = true;
1066
1067         /* We expect two virtqueues, receive then send,
1068          * and optionally control. */
1069         nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
1070
1071         err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
1072         if (err)
1073                 goto free_stats;
1074
1075         vi->rvq = vqs[0];
1076         vi->svq = vqs[1];
1077
1078         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1079                 vi->cvq = vqs[2];
1080
1081                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1082                         dev->features |= NETIF_F_HW_VLAN_FILTER;
1083         }
1084
1085         err = register_netdev(dev);
1086         if (err) {
1087                 pr_debug("virtio_net: registering device failed\n");
1088                 goto free_vqs;
1089         }
1090
1091         /* Last of all, set up some receive buffers. */
1092         try_fill_recv(vi, GFP_KERNEL);
1093
1094         /* If we didn't even get one input buffer, we're useless. */
1095         if (vi->num == 0) {
1096                 err = -ENOMEM;
1097                 goto unregister;
1098         }
1099
1100         /* Assume link up if device can't report link status,
1101            otherwise get link status from config. */
1102         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1103                 netif_carrier_off(dev);
1104                 virtnet_update_status(vi);
1105         } else {
1106                 vi->status = VIRTIO_NET_S_LINK_UP;
1107                 netif_carrier_on(dev);
1108         }
1109
1110         pr_debug("virtnet: registered device %s\n", dev->name);
1111         return 0;
1112
1113 unregister:
1114         unregister_netdev(dev);
1115 free_vqs:
1116         vdev->config->del_vqs(vdev);
1117 free_stats:
1118         free_percpu(vi->stats);
1119 free:
1120         free_netdev(dev);
1121         return err;
1122 }
1123
1124 static void free_unused_bufs(struct virtnet_info *vi)
1125 {
1126         void *buf;
1127         while (1) {
1128                 buf = virtqueue_detach_unused_buf(vi->svq);
1129                 if (!buf)
1130                         break;
1131                 dev_kfree_skb(buf);
1132         }
1133         while (1) {
1134                 buf = virtqueue_detach_unused_buf(vi->rvq);
1135                 if (!buf)
1136                         break;
1137                 if (vi->mergeable_rx_bufs || vi->big_packets)
1138                         give_pages(vi, buf);
1139                 else
1140                         dev_kfree_skb(buf);
1141                 --vi->num;
1142         }
1143         BUG_ON(vi->num != 0);
1144 }
1145
1146 static void __devexit virtnet_remove(struct virtio_device *vdev)
1147 {
1148         struct virtnet_info *vi = vdev->priv;
1149
1150         /* Stop all the virtqueues. */
1151         vdev->config->reset(vdev);
1152
1153         unregister_netdev(vi->dev);
1154
1155         /* Free unused buffers in both send and recv, if any. */
1156         free_unused_bufs(vi);
1157
1158         vdev->config->del_vqs(vi->vdev);
1159
1160         while (vi->pages)
1161                 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1162
1163         free_percpu(vi->stats);
1164         free_netdev(vi->dev);
1165 }
1166
1167 static struct virtio_device_id id_table[] = {
1168         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1169         { 0 },
1170 };
1171
1172 static unsigned int features[] = {
1173         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1174         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1175         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1176         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1177         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1178         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1179         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1180 };
1181
1182 static struct virtio_driver virtio_net_driver = {
1183         .feature_table = features,
1184         .feature_table_size = ARRAY_SIZE(features),
1185         .driver.name =  KBUILD_MODNAME,
1186         .driver.owner = THIS_MODULE,
1187         .id_table =     id_table,
1188         .probe =        virtnet_probe,
1189         .remove =       __devexit_p(virtnet_remove),
1190         .config_changed = virtnet_config_changed,
1191 };
1192
1193 static int __init init(void)
1194 {
1195         return register_virtio_driver(&virtio_net_driver);
1196 }
1197
1198 static void __exit fini(void)
1199 {
1200         unregister_virtio_driver(&virtio_net_driver);
1201 }
1202 module_init(init);
1203 module_exit(fini);
1204
1205 MODULE_DEVICE_TABLE(virtio, id_table);
1206 MODULE_DESCRIPTION("Virtio network driver");
1207 MODULE_LICENSE("GPL");