]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Mon, 31 Mar 2014 20:56:43 +0000 (16:56 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 31 Mar 2014 20:56:43 +0000 (16:56 -0400)
Conflicts:
drivers/net/xen-netback/netback.c

A bug fix overlapped with changing how the netback SKB control
block is implemented.

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ieee802154/at86rf230.c
drivers/net/xen-netback/netback.c

index e8004ef73bc1c6a900f57b741d1780391cad769f..89417ac41083e59c2c44512ef804a7861ef1d1cc 100644 (file)
@@ -1249,6 +1249,8 @@ static int at86rf230_remove(struct spi_device *spi)
        struct at86rf230_local *lp = spi_get_drvdata(spi);
        struct at86rf230_platform_data *pdata = spi->dev.platform_data;
 
+       /* mask all at86rf230 irq's */
+       at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
        ieee802154_unregister_device(lp->dev);
 
        free_irq(spi->irq, lp);
index cb784fe5220cccbed4a62c33452d3c53655fa80d..ae34f5fc7fbc503f0feda999a0f729e5ab08cdc7 100644 (file)
@@ -191,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
         * into multiple copies tend to give large frags their
         * own buffers as before.
         */
-       if ((offset + size > MAX_BUFFER_OFFSET) &&
-           (size <= MAX_BUFFER_OFFSET) && offset && !head)
+       BUG_ON(size > MAX_BUFFER_OFFSET);
+       if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
                return true;
 
        return false;
@@ -511,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
 
        while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
                RING_IDX max_slots_needed;
+               RING_IDX old_req_cons;
+               RING_IDX ring_slots_used;
                int i;
 
                /* We need a cheap worse case estimate for the number of
@@ -522,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
                                                PAGE_SIZE);
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        unsigned int size;
+                       unsigned int offset;
+
                        size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
-                       max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+                       offset = skb_shinfo(skb)->frags[i].page_offset;
+
+                       /* For a worse-case estimate we need to factor in
+                        * the fragment page offset as this will affect the
+                        * number of times xenvif_gop_frag_copy() will
+                        * call start_new_rx_buffer().
+                        */
+                       max_slots_needed += DIV_ROUND_UP(offset + size,
+                                                        PAGE_SIZE);
                }
+
+               /* To avoid the estimate becoming too pessimal for some
+                * frontends that limit posted rx requests, cap the estimate
+                * at MAX_SKB_FRAGS.
+                */
+               if (max_slots_needed > MAX_SKB_FRAGS)
+                       max_slots_needed = MAX_SKB_FRAGS;
+
+               /* We may need one more slot for GSO metadata */
                if (skb_is_gso(skb) &&
                   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
                    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -539,8 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
                } else
                        vif->rx_last_skb_slots = 0;
 
+               old_req_cons = vif->rx.req_cons;
                XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
-               BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed);
+               ring_slots_used = vif->rx.req_cons - old_req_cons;
+
+               BUG_ON(ring_slots_used > max_slots_needed);
 
                __skb_queue_tail(&rxq, skb);
        }