/* # of queue pairs currently used by the driver */
u16 curr_queue_pairs;
+ /* # of XDP queue pairs currently used by the driver */
+ u16 xdp_queue_pairs;
+
/* I like... big packets and I cannot lie! */
bool big_packets;
return skb;
}
+static void virtnet_xdp_xmit(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct send_queue *sq,
+ struct xdp_buff *xdp)
+{
+ struct page *page = virt_to_head_page(xdp->data);
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+ unsigned int num_sg, len;
+ void *xdp_sent;
+ int err;
+
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ struct page *sent_page = virt_to_head_page(xdp_sent);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(sent_page);
+ else
+ give_pages(rq, sent_page);
+ }
+
+ /* Zero header and leave csum up to XDP layers */
+ hdr = xdp->data;
+ memset(hdr, 0, vi->hdr_len);
+
+ num_sg = 1;
+ sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+ xdp->data, GFP_ATOMIC);
+ if (unlikely(err)) {
+ if (vi->mergeable_rx_bufs)
+ put_page(page);
+ else
+ give_pages(rq, page);
+ return; // On error abort to avoid unnecessary kick
+ } else if (!vi->mergeable_rx_bufs) {
+ /* If not mergeable bufs must be big packets so cleanup pages */
+ give_pages(rq, (struct page *)page->private);
+ page->private = 0;
+ }
+
+ virtqueue_kick(sq->vq);
+}
+
static u32 do_xdp_prog(struct virtnet_info *vi,
+ struct receive_queue *rq,
struct bpf_prog *xdp_prog,
struct page *page, int offset, int len)
{
int hdr_padded_len;
struct xdp_buff xdp;
+ unsigned int qp;
u32 act;
u8 *buf;
switch (act) {
case XDP_PASS:
return XDP_PASS;
+ case XDP_TX:
+ qp = vi->curr_queue_pairs -
+ vi->xdp_queue_pairs +
+ smp_processor_id();
+ xdp.data = buf + (vi->mergeable_rx_bufs ? 0 : 4);
+ virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp);
+ return XDP_TX;
default:
bpf_warn_invalid_xdp_action(act);
- case XDP_TX:
case XDP_ABORTED:
case XDP_DROP:
return XDP_DROP;
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
goto err_xdp;
- act = do_xdp_prog(vi, xdp_prog, page, 0, len);
- if (act == XDP_DROP)
+ act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
goto err_xdp;
+ }
}
rcu_read_unlock();
err:
dev->stats.rx_dropped++;
give_pages(rq, page);
+xdp_xmit:
+ return NULL;
+}
+
+/* The conditions to enable XDP should preclude the underlying device from
+ * sending packets across multiple buffers (num_buf > 1). However per spec
+ * it does not appear to be illegal to do so but rather just against convention.
+ * So in order to avoid making a system unresponsive the packets are pushed
+ * into a page and the XDP program is run. This will be extremely slow and we
+ * push a warning to the user to fix this as soon as possible. Fixing this may
+ * require resolving the underlying hardware to determine why multiple buffers
+ * are being received or simply loading the XDP program in the ingress stack
+ * after the skb is built because there is no advantage to running it here
+ * anymore.
+ */
+static struct page *xdp_linearize_page(struct receive_queue *rq,
+ u16 num_buf,
+ struct page *p,
+ int offset,
+ unsigned int *len)
+{
+ struct page *page = alloc_page(GFP_ATOMIC);
+ unsigned int page_off = 0;
+
+ if (!page)
+ return NULL;
+
+ memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+ page_off += *len;
+
+ while (--num_buf) {
+ unsigned int buflen;
+ unsigned long ctx;
+ void *buf;
+ int off;
+
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
+ if (unlikely(!ctx))
+ goto err_buf;
+
+ /* guard against a misconfigured or uncooperative backend that
+ * is sending packet larger than the MTU.
+ */
+ if ((page_off + buflen) > PAGE_SIZE)
+ goto err_buf;
+
+ buf = mergeable_ctx_to_buf_address(ctx);
+ p = virt_to_head_page(buf);
+ off = buf - page_address(p);
+
+ memcpy(page_address(page) + page_off,
+ page_address(p) + off, buflen);
+ page_off += buflen;
+ }
+
+ *len = page_off;
+ return page;
+err_buf:
+ __free_pages(page, 0);
return NULL;
}
struct bpf_prog *xdp_prog;
unsigned int truesize;
+ head_skb = NULL;
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
+ struct page *xdp_page;
u32 act;
/* No known backend devices should send packets with
*/
if (unlikely(num_buf > 1)) {
bpf_warn_invalid_xdp_buffer();
- goto err_xdp;
+
+ /* linearize data for XDP */
+ xdp_page = xdp_linearize_page(rq, num_buf,
+ page, offset, &len);
+ if (!xdp_page)
+ goto err_xdp;
+ offset = 0;
+ } else {
+ xdp_page = page;
}
/* Transient failure which in theory could occur if
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
goto err_xdp;
- act = do_xdp_prog(vi, xdp_prog, page, offset, len);
- if (act == XDP_DROP)
+ act = do_xdp_prog(vi, rq, xdp_prog, page, offset, len);
+ switch (act) {
+ case XDP_PASS:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+ break;
+ case XDP_TX:
+ if (unlikely(xdp_page != page))
+ goto err_xdp;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
goto err_xdp;
+ }
}
rcu_read_unlock();
err_buf:
dev->stats.rx_dropped++;
dev_kfree_skb(head_skb);
+xdp_xmit:
return NULL;
}
unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
struct virtnet_info *vi = netdev_priv(dev);
struct bpf_prog *old_prog;
- int i;
+ u16 xdp_qp = 0, curr_qp;
+ int i, err;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6)) {
return -EINVAL;
}
+ curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
+ if (prog)
+ xdp_qp = nr_cpu_ids;
+
+ /* XDP requires extra queues for XDP_TX */
+ if (curr_qp + xdp_qp > vi->max_queue_pairs) {
+ netdev_warn(dev, "request %i queues but max is %i\n",
+ curr_qp + xdp_qp, vi->max_queue_pairs);
+ return -ENOMEM;
+ }
+
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
+ if (err) {
+ dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
+ return err;
+ }
+
if (prog) {
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
- if (IS_ERR(prog))
+ if (IS_ERR(prog)) {
+ virtnet_set_queues(vi, curr_qp);
return PTR_ERR(prog);
+ }
}
+ vi->xdp_queue_pairs = xdp_qp;
+ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+
for (i = 0; i < vi->max_queue_pairs; i++) {
old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
put_page(vi->rq[i].alloc_frag.page);
}
+static bool is_xdp_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- dev_kfree_skb(buf);
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+ if (!is_xdp_queue(vi, i))
+ dev_kfree_skb(buf);
+ else
+ put_page(virt_to_head_page(buf));
+ }
}
for (i = 0; i < vi->max_queue_pairs; i++) {