if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT))
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
else
dev_kfree_skb_any(skb);
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- skb = __skb_dequeue(&priv->rx_recycle);
+ skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = netdev_alloc_skb(dev,
priv->rx_buffer_size + RXBUF_ALIGNMENT);
* recycle list.
*/
skb->data = skb->head + NET_SKB_PAD;
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
}
} else {
/* Increment the number of packets */