]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/net/virtio_net.c
net: fddi: skfp: use %p format specifier for addresses rather than %x
[karo-tx-linux.git] / drivers / net / virtio_net.c
index 1d16862c571061ea14fe74331a9a1b997fe92ceb..08327e005cccf27fc18db64d234aaefd0dcd1a1f 100644 (file)
@@ -330,12 +330,58 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        return skb;
 }
 
+static void virtnet_xdp_xmit(struct virtnet_info *vi,
+                            struct receive_queue *rq,
+                            struct send_queue *sq,
+                            struct xdp_buff *xdp)
+{
+       struct page *page = virt_to_head_page(xdp->data);
+       struct virtio_net_hdr_mrg_rxbuf *hdr;
+       unsigned int num_sg, len;
+       void *xdp_sent;
+       int err;
+
+       /* Free up any pending old buffers before queueing new ones. */
+       while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+               struct page *sent_page = virt_to_head_page(xdp_sent);
+
+               if (vi->mergeable_rx_bufs)
+                       put_page(sent_page);
+               else
+                       give_pages(rq, sent_page);
+       }
+
+       /* Zero header and leave csum up to XDP layers */
+       hdr = xdp->data;
+       memset(hdr, 0, vi->hdr_len);
+
+       num_sg = 1;
+       sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+       err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+                                  xdp->data, GFP_ATOMIC);
+       if (unlikely(err)) {
+               if (vi->mergeable_rx_bufs)
+                       put_page(page);
+               else
+                       give_pages(rq, page);
+               return; // On error abort to avoid unnecessary kick
+       } else if (!vi->mergeable_rx_bufs) {
+               /* If not mergeable bufs must be big packets so cleanup pages */
+               give_pages(rq, (struct page *)page->private);
+               page->private = 0;
+       }
+
+       virtqueue_kick(sq->vq);
+}
+
 static u32 do_xdp_prog(struct virtnet_info *vi,
+                      struct receive_queue *rq,
                       struct bpf_prog *xdp_prog,
                       struct page *page, int offset, int len)
 {
        int hdr_padded_len;
        struct xdp_buff xdp;
+       unsigned int qp;
        u32 act;
        u8 *buf;
 
@@ -353,9 +399,15 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
        switch (act) {
        case XDP_PASS:
                return XDP_PASS;
+       case XDP_TX:
+               qp = vi->curr_queue_pairs -
+                       vi->xdp_queue_pairs +
+                       smp_processor_id();
+               xdp.data = buf + (vi->mergeable_rx_bufs ? 0 : 4);
+               virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp);
+               return XDP_TX;
        default:
                bpf_warn_invalid_xdp_action(act);
-       case XDP_TX:
        case XDP_ABORTED:
        case XDP_DROP:
                return XDP_DROP;
@@ -390,9 +442,17 @@ static struct sk_buff *receive_big(struct net_device *dev,
 
                if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
                        goto err_xdp;
-               act = do_xdp_prog(vi, xdp_prog, page, 0, len);
-               if (act == XDP_DROP)
+               act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
+               switch (act) {
+               case XDP_PASS:
+                       break;
+               case XDP_TX:
+                       rcu_read_unlock();
+                       goto xdp_xmit;
+               case XDP_DROP:
+               default:
                        goto err_xdp;
+               }
        }
        rcu_read_unlock();
 
@@ -407,6 +467,65 @@ err_xdp:
 err:
        dev->stats.rx_dropped++;
        give_pages(rq, page);
+xdp_xmit:
+       return NULL;
+}
+
+/* The conditions to enable XDP should preclude the underlying device from
+ * sending packets across multiple buffers (num_buf > 1). However per spec
+ * it does not appear to be illegal to do so but rather just against convention.
+ * So in order to avoid making a system unresponsive the packets are pushed
+ * into a page and the XDP program is run. This will be extremely slow and we
+ * push a warning to the user to fix this as soon as possible. Fixing this may
+ * require resolving the underlying hardware to determine why multiple buffers
+ * are being received or simply loading the XDP program in the ingress stack
+ * after the skb is built because there is no advantage to running it here
+ * anymore.
+ */
+static struct page *xdp_linearize_page(struct receive_queue *rq,
+                                      u16 num_buf,
+                                      struct page *p,
+                                      int offset,
+                                      unsigned int *len)
+{
+       struct page *page = alloc_page(GFP_ATOMIC);
+       unsigned int page_off = 0;
+
+       if (!page)
+               return NULL;
+
+       memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+       page_off += *len;
+
+       while (--num_buf) {
+               unsigned int buflen;
+               unsigned long ctx;
+               void *buf;
+               int off;
+
+               ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
+               if (unlikely(!ctx))
+                       goto err_buf;
+
+               /* guard against a misconfigured or uncooperative backend that
+                * is sending packet larger than the MTU.
+                */
+               if ((page_off + buflen) > PAGE_SIZE)
+                       goto err_buf;
+
+               buf = mergeable_ctx_to_buf_address(ctx);
+               p = virt_to_head_page(buf);
+               off = buf - page_address(p);
+
+               memcpy(page_address(page) + page_off,
+                      page_address(p) + off, buflen);
+               page_off += buflen;
+       }
+
+       *len = page_off;
+       return page;
+err_buf:
+       __free_pages(page, 0);
        return NULL;
 }
 
@@ -425,9 +544,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        struct bpf_prog *xdp_prog;
        unsigned int truesize;
 
+       head_skb = NULL;
+
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
+               struct page *xdp_page;
                u32 act;
 
                /* No known backend devices should send packets with
@@ -437,7 +559,15 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                 */
                if (unlikely(num_buf > 1)) {
                        bpf_warn_invalid_xdp_buffer();
-                       goto err_xdp;
+
+                       /* linearize data for XDP */
+                       xdp_page = xdp_linearize_page(rq, num_buf,
+                                                     page, offset, &len);
+                       if (!xdp_page)
+                               goto err_xdp;
+                       offset = 0;
+               } else {
+                       xdp_page = page;
                }
 
                /* Transient failure which in theory could occur if
@@ -448,9 +578,23 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
                        goto err_xdp;
 
-               act = do_xdp_prog(vi, xdp_prog, page, offset, len);
-               if (act == XDP_DROP)
+               act = do_xdp_prog(vi, rq, xdp_prog, page, offset, len);
+               switch (act) {
+               case XDP_PASS:
+                       if (unlikely(xdp_page != page))
+                               __free_pages(xdp_page, 0);
+                       break;
+               case XDP_TX:
+                       if (unlikely(xdp_page != page))
+                               goto err_xdp;
+                       rcu_read_unlock();
+                       goto xdp_xmit;
+               case XDP_DROP:
+               default:
+                       if (unlikely(xdp_page != page))
+                               __free_pages(xdp_page, 0);
                        goto err_xdp;
+               }
        }
        rcu_read_unlock();
 
@@ -528,6 +672,7 @@ err_skb:
 err_buf:
        dev->stats.rx_dropped++;
        dev_kfree_skb(head_skb);
+xdp_xmit:
        return NULL;
 }
 
@@ -1713,6 +1858,16 @@ static void free_receive_page_frags(struct virtnet_info *vi)
                        put_page(vi->rq[i].alloc_frag.page);
 }
 
+static bool is_xdp_queue(struct virtnet_info *vi, int q)
+{
+       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+               return false;
+       else if (q < vi->curr_queue_pairs)
+               return true;
+       else
+               return false;
+}
+
 static void free_unused_bufs(struct virtnet_info *vi)
 {
        void *buf;
@@ -1720,8 +1875,12 @@ static void free_unused_bufs(struct virtnet_info *vi)
 
        for (i = 0; i < vi->max_queue_pairs; i++) {
                struct virtqueue *vq = vi->sq[i].vq;
-               while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
-                       dev_kfree_skb(buf);
+               while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+                       if (!is_xdp_queue(vi, i))
+                               dev_kfree_skb(buf);
+                       else
+                               put_page(virt_to_head_page(buf));
+               }
        }
 
        for (i = 0; i < vi->max_queue_pairs; i++) {