]> git.karo-electronics.de Git - linux-beck.git/commitdiff
IB/ipoib: Add handling for sending of skb with many frags
authorHans Westgaard Ry <hans.westgaard.ry@oracle.com>
Wed, 2 Mar 2016 12:44:28 +0000 (13:44 +0100)
committerDoug Ledford <dledford@redhat.com>
Thu, 3 Mar 2016 14:49:44 +0000 (09:49 -0500)
IPoIB converts skb-fragments to sge adding 1 extra sge when SG is enabled.
Current codepath assumes that the max number of sge a device support
is at least MAX_SKB_FRAGS+1, there is no interaction with upper layers
to limit number of fragments in an skb if a device suports fewer
sges. The assumptions also lead to requesting a fixed number of sge
when IPoIB creates queue-pairs with SG enabled.

A fallback/slowpath is implemented using skb_linearize to
handle cases where the conversion would result in more sges than supported.

Signed-off-by: Hans Westgaard Ry <hans.westgaard.ry@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Reviewed-by: Wei Lin Guay <wei.lin.guay@oracle.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c

index a6f3eab0f350ef036cfb44a47fbbafe5f9bd1fc1..85be0de3ab2635af080000136a04756073825a09 100644 (file)
@@ -244,6 +244,7 @@ struct ipoib_cm_tx {
        unsigned             tx_tail;
        unsigned long        flags;
        u32                  mtu;
+       unsigned             max_send_sge;
 };
 
 struct ipoib_cm_rx_buf {
@@ -390,6 +391,7 @@ struct ipoib_dev_priv {
        int     hca_caps;
        struct ipoib_ethtool_st ethtool;
        struct timer_list poll_timer;
+       unsigned max_send_sge;
 };
 
 struct ipoib_ah {
index 917e46ea3bf681681a4abba5e9ce6e86514d07eb..c8ed53562c9b54cfc3fd21ece14ea6b95f94c872 100644 (file)
@@ -710,6 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_tx_buf *tx_req;
        int rc;
+       unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
 
        if (unlikely(skb->len > tx->mtu)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
                return;
        }
-
+       if (skb_shinfo(skb)->nr_frags > usable_sge) {
+               if (skb_linearize(skb) < 0) {
+                       ipoib_warn(priv, "skb could not be linearized\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+               /* Does skb_linearize return ok without reducing nr_frags? */
+               if (skb_shinfo(skb)->nr_frags > usable_sge) {
+                       ipoib_warn(priv, "too many frags after skb linearize\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+       }
        ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
                       tx->tx_head, skb->len, tx->qp->qp_num);
 
@@ -1031,7 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
        struct ib_qp *tx_qp;
 
        if (dev->features & NETIF_F_SG)
-               attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+               attr.cap.max_send_sge =
+                       min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
 
        tx_qp = ib_create_qp(priv->pd, &attr);
        if (PTR_ERR(tx_qp) == -EINVAL) {
@@ -1040,6 +1058,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
                attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
                tx_qp = ib_create_qp(priv->pd, &attr);
        }
+       tx->max_send_sge = attr.cap.max_send_sge;
        return tx_qp;
 }
 
index fa9c42ff1fb00963a47a71868ff96abb6e15189b..899e6b7fb8a5f6a44d82058f49f7001c805ee835 100644 (file)
@@ -538,6 +538,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        struct ipoib_tx_buf *tx_req;
        int hlen, rc;
        void *phead;
+       unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
 
        if (skb_is_gso(skb)) {
                hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -561,6 +562,23 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                phead = NULL;
                hlen  = 0;
        }
+       if (skb_shinfo(skb)->nr_frags > usable_sge) {
+               if (skb_linearize(skb) < 0) {
+                       ipoib_warn(priv, "skb could not be linearized\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+               /* Does skb_linearize return ok without reducing nr_frags? */
+               if (skb_shinfo(skb)->nr_frags > usable_sge) {
+                       ipoib_warn(priv, "too many frags after skb linearize\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+       }
 
        ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
                       skb->len, address, qpn);
index d48c5bae78774663c17e72ed1e4c87475e1005ad..b809c373e40e54598a39bf555571ff80eba25277 100644 (file)
@@ -206,7 +206,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
 
        if (dev->features & NETIF_F_SG)
-               init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+               init_attr.cap.max_send_sge =
+                       min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
 
        priv->qp = ib_create_qp(priv->pd, &init_attr);
        if (IS_ERR(priv->qp)) {
@@ -233,6 +234,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        priv->rx_wr.next = NULL;
        priv->rx_wr.sg_list = priv->rx_sge;
 
+       priv->max_send_sge = init_attr.cap.max_send_sge;
+
        return 0;
 
 out_free_send_cq: