]> git.karo-electronics.de Git - linux-beck.git/commitdiff
net: Remove unused netdev arg from some NAPI interfaces.
authorNeil Horman <nhorman@tuxdriver.com>
Tue, 23 Dec 2008 04:43:12 +0000 (20:43 -0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 23 Dec 2008 04:43:12 +0000 (20:43 -0800)
When the napi api was changed to separate its 1:1 binding to the net_device
struct, the netif_rx_[prep|schedule|complete] api failed to remove the now
vestigual net_device structure parameter.  This patch cleans up that api by
properly removing it..

Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
66 files changed:
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/net/8139cp.c
drivers/net/8139too.c
drivers/net/amd8111e.c
drivers/net/arm/ep93xx_eth.c
drivers/net/arm/ixp4xx_eth.c
drivers/net/atl1e/atl1e_main.c
drivers/net/b44.c
drivers/net/bnx2.c
drivers/net/bnx2x_main.c
drivers/net/cassini.c
drivers/net/chelsio/sge.c
drivers/net/cpmac.c
drivers/net/e100.c
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/netdev.c
drivers/net/ehea/ehea_main.c
drivers/net/enic/enic_main.c
drivers/net/epic100.c
drivers/net/forcedeth.c
drivers/net/fs_enet/fs_enet-main.c
drivers/net/gianfar.c
drivers/net/ibmveth.c
drivers/net/igb/igb_main.c
drivers/net/ixgb/ixgb_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixp2000/ixpdev.c
drivers/net/jme.c
drivers/net/jme.h
drivers/net/korina.c
drivers/net/macb.c
drivers/net/mlx4/en_rx.c
drivers/net/myri10ge/myri10ge.c
drivers/net/natsemi.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/niu.c
drivers/net/pasemi_mac.c
drivers/net/pcnet32.c
drivers/net/qla3xxx.c
drivers/net/qlge/qlge_main.c
drivers/net/r6040.c
drivers/net/r8169.c
drivers/net/s2io.c
drivers/net/sb1250-mac.c
drivers/net/sfc/efx.c
drivers/net/sfc/efx.h
drivers/net/skge.c
drivers/net/smsc911x.c
drivers/net/smsc9420.c
drivers/net/spider_net.c
drivers/net/starfire.c
drivers/net/sungem.c
drivers/net/tc35815.c
drivers/net/tehuti.c
drivers/net/tg3.c
drivers/net/tsi108_eth.c
drivers/net/tulip/interrupt.c
drivers/net/typhoon.c
drivers/net/ucc_geth.c
drivers/net/via-rhine.c
drivers/net/virtio_net.c
drivers/net/wan/hd64572.c
drivers/net/xen-netfront.c
include/linux/netdevice.h

index 7c49cc882d75c580c0c31e20b987cf177d04fc9c..735c125b48af1073c62e1f924e88cf52c39be72e 100644 (file)
@@ -2541,7 +2541,7 @@ static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic
 {
        struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
 
-       netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi);
+       netif_rx_schedule(&nesvnic->napi);
 }
 
 
index 3c96203e0d915ff3ffff6347bc182bfb9e89d5cb..80e7a4d98d5ba15443933cdb48f8085040b3cd8c 100644 (file)
@@ -112,7 +112,7 @@ static int nes_netdev_poll(struct napi_struct *napi, int budget)
        nes_nic_ce_handler(nesdev, nescq);
 
        if (nescq->cqes_pending == 0) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                /* clear out completed cqes and arm */
                nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
                                nescq->cq_number | (nescq->cqe_allocs_pending << 16));
index 28eb6f03c588987c1c417377205e53b34b2f5058..a1925810be3cb23f8e768b030c60d68944267b5f 100644 (file)
@@ -446,11 +446,11 @@ poll_more:
                if (dev->features & NETIF_F_LRO)
                        lro_flush_all(&priv->lro.lro_mgr);
 
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                if (unlikely(ib_req_notify_cq(priv->recv_cq,
                                              IB_CQ_NEXT_COMP |
                                              IB_CQ_REPORT_MISSED_EVENTS)) &&
-                   netif_rx_reschedule(dev, napi))
+                   netif_rx_reschedule(napi))
                        goto poll_more;
        }
 
@@ -462,7 +462,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
        struct net_device *dev = dev_ptr;
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
-       netif_rx_schedule(dev, &priv->napi);
+       netif_rx_schedule(&priv->napi);
 }
 
 static void drain_tx_cq(struct net_device *dev)
index f6d9d1353dd54144aebb8541d85c572f073fac25..dd7ac8290aecfc7323554e90ab0850fe64713346 100644 (file)
@@ -604,7 +604,7 @@ rx_next:
 
                spin_lock_irqsave(&cp->lock, flags);
                cpw16_f(IntrMask, cp_intr_mask);
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
                spin_unlock_irqrestore(&cp->lock, flags);
        }
 
@@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
        }
 
        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
-               if (netif_rx_schedule_prep(dev, &cp->napi)) {
+               if (netif_rx_schedule_prep(&cp->napi)) {
                        cpw16_f(IntrMask, cp_norx_intr_mask);
-                       __netif_rx_schedule(dev, &cp->napi);
+                       __netif_rx_schedule(&cp->napi);
                }
 
        if (status & (TxOK | TxErr | TxEmpty | SWInt))
index 67bbf4f25beac14d425eaf79ff6fb00b715a063e..fe370f8057933244e544cd5849e064e514edaa11 100644 (file)
@@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
                 */
                spin_lock_irqsave(&tp->lock, flags);
                RTL_W16_F(IntrMask, rtl8139_intr_mask);
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
                spin_unlock_irqrestore(&tp->lock, flags);
        }
        spin_unlock(&tp->rx_lock);
@@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
        /* Receive packets are processed by poll routine.
           If not running start it now. */
        if (status & RxAckBits){
-               if (netif_rx_schedule_prep(dev, &tp->napi)) {
+               if (netif_rx_schedule_prep(&tp->napi)) {
                        RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
-                       __netif_rx_schedule(dev, &tp->napi);
+                       __netif_rx_schedule(&tp->napi);
                }
        }
 
index 0bc4f54d5db95861ecb6ff9cbe68b71df0bd370c..187ac6eb6e945e69f8fca9cd87e1161dd1ceb919 100644 (file)
@@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
        if (rx_pkt_limit > 0) {
                /* Receive descriptor is empty now */
                spin_lock_irqsave(&lp->lock, flags);
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
                writel(VAL0|RINTEN0, mmio + INTEN0);
                writel(VAL2 | RDMD0, mmio + CMD0);
                spin_unlock_irqrestore(&lp->lock, flags);
@@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
 
        /* Check if Receive Interrupt has occurred. */
        if (intr0 & RINT0) {
-               if (netif_rx_schedule_prep(dev, &lp->napi)) {
+               if (netif_rx_schedule_prep(&lp->napi)) {
                        /* Disable receive interupts */
                        writel(RINTEN0, mmio + INTEN0);
                        /* Schedule a polling routine */
-                       __netif_rx_schedule(dev, &lp->napi);
+                       __netif_rx_schedule(&lp->napi);
                } else if (intren0 & RINTEN0) {
                        printk("************Driver bug! \
                                interrupt while in poll\n");
index 588c9739d13d2891ebbd0d2df7e873ecefc834db..6ecc600c1bccd05c5e22e17af5248fe998e9ecb0 100644 (file)
@@ -298,7 +298,7 @@ poll_some_more:
                int more = 0;
 
                spin_lock_irq(&ep->rx_lock);
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
                wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
                if (ep93xx_have_more_rx(ep)) {
                        wrl(ep, REG_INTEN, REG_INTEN_TX);
@@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
 
        if (status & REG_INTSTS_RX) {
                spin_lock(&ep->rx_lock);
-               if (likely(netif_rx_schedule_prep(dev, &ep->napi))) {
+               if (likely(netif_rx_schedule_prep(&ep->napi))) {
                        wrl(ep, REG_INTEN, REG_INTEN_TX);
-                       __netif_rx_schedule(dev, &ep->napi);
+                       __netif_rx_schedule(&ep->napi);
                }
                spin_unlock(&ep->rx_lock);
        }
index 14ffa2a61890d12df652841e0f6d16b5278436a5..b03609f2e90fda781cf288b5f51922ae9e21a7fe 100644 (file)
@@ -498,7 +498,7 @@ static void eth_rx_irq(void *pdev)
        printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
 #endif
        qmgr_disable_irq(port->plat->rxq);
-       netif_rx_schedule(dev, &port->napi);
+       netif_rx_schedule(&port->napi);
 }
 
 static int eth_poll(struct napi_struct *napi, int budget)
@@ -526,7 +526,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
                        printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
                               dev->name);
 #endif
-                       netif_rx_complete(dev, napi);
+                       netif_rx_complete(napi);
                        qmgr_enable_irq(rxq);
                        if (!qmgr_stat_empty(rxq) &&
                            netif_rx_reschedule(dev, napi)) {
@@ -1025,7 +1025,7 @@ static int eth_open(struct net_device *dev)
        }
        ports_open++;
        /* we may already have RX data, enables IRQ */
-       netif_rx_schedule(dev, &port->napi);
+       netif_rx_schedule(&port->napi);
        return 0;
 }
 
index 98b2a7a466b8c3f9377a1812ab551f0c59000995..a72a46145ed78ef896b07ddd83aaf1e463093535 100644 (file)
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
                        AT_WRITE_REG(hw, REG_IMR,
                                     IMR_NORMAL_MASK & ~ISR_RX_EVENT);
                        AT_WRITE_FLUSH(hw);
-                       if (likely(netif_rx_schedule_prep(netdev,
+                       if (likely(netif_rx_schedule_prep(
                                   &adapter->napi)))
-                               __netif_rx_schedule(netdev, &adapter->napi);
+                               __netif_rx_schedule(&adapter->napi);
                }
        } while (--max_ints > 0);
        /* re-enable Interrupt*/
@@ -1515,7 +1515,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
        /* If no Tx and not enough Rx work done, exit the polling mode */
        if (work_done < budget) {
 quit_polling:
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
                AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
                /* test debug */
index 2c7a32eb92a5c1ffdaa33f3fd008427d47192884..934a95091dc3d72176ff00c0fdc95c19bd2cd5f3 100644 (file)
@@ -875,7 +875,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                b44_enable_ints(bp);
        }
 
@@ -907,13 +907,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
                        goto irq_ack;
                }
 
-               if (netif_rx_schedule_prep(dev, &bp->napi)) {
+               if (netif_rx_schedule_prep(&bp->napi)) {
                        /* NOTE: These writes are posted by the readback of
                         *       the ISTAT register below.
                         */
                        bp->istat = istat;
                        __b44_disable_ints(bp);
-                       __netif_rx_schedule(dev, &bp->napi);
+                       __netif_rx_schedule(&bp->napi);
                } else {
                        printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
                               dev->name);
index 1a2780374a49b2c52385e511345ca7dd06d8f758..33d69ddc90a3c26797d34e3d59662f10c0e3ae1a 100644 (file)
@@ -3043,7 +3043,6 @@ bnx2_msi(int irq, void *dev_instance)
 {
        struct bnx2_napi *bnapi = dev_instance;
        struct bnx2 *bp = bnapi->bp;
-       struct net_device *dev = bp->dev;
 
        prefetch(bnapi->status_blk.msi);
        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
@@ -3054,7 +3053,7 @@ bnx2_msi(int irq, void *dev_instance)
        if (unlikely(atomic_read(&bp->intr_sem) != 0))
                return IRQ_HANDLED;
 
-       netif_rx_schedule(dev, &bnapi->napi);
+       netif_rx_schedule(&bnapi->napi);
 
        return IRQ_HANDLED;
 }
@@ -3064,7 +3063,6 @@ bnx2_msi_1shot(int irq, void *dev_instance)
 {
        struct bnx2_napi *bnapi = dev_instance;
        struct bnx2 *bp = bnapi->bp;
-       struct net_device *dev = bp->dev;
 
        prefetch(bnapi->status_blk.msi);
 
@@ -3072,7 +3070,7 @@ bnx2_msi_1shot(int irq, void *dev_instance)
        if (unlikely(atomic_read(&bp->intr_sem) != 0))
                return IRQ_HANDLED;
 
-       netif_rx_schedule(dev, &bnapi->napi);
+       netif_rx_schedule(&bnapi->napi);
 
        return IRQ_HANDLED;
 }
@@ -3082,7 +3080,6 @@ bnx2_interrupt(int irq, void *dev_instance)
 {
        struct bnx2_napi *bnapi = dev_instance;
        struct bnx2 *bp = bnapi->bp;
-       struct net_device *dev = bp->dev;
        struct status_block *sblk = bnapi->status_blk.msi;
 
        /* When using INTx, it is possible for the interrupt to arrive
@@ -3109,9 +3106,9 @@ bnx2_interrupt(int irq, void *dev_instance)
        if (unlikely(atomic_read(&bp->intr_sem) != 0))
                return IRQ_HANDLED;
 
-       if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
+       if (netif_rx_schedule_prep(&bnapi->napi)) {
                bnapi->last_status_idx = sblk->status_idx;
-               __netif_rx_schedule(dev, &bnapi->napi);
+               __netif_rx_schedule(&bnapi->napi);
        }
 
        return IRQ_HANDLED;
@@ -3221,7 +3218,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
                rmb();
                if (likely(!bnx2_has_fast_work(bnapi))) {
 
-                       netif_rx_complete(bp->dev, napi);
+                       netif_rx_complete(napi);
                        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
                               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
                               bnapi->last_status_idx);
@@ -3254,7 +3251,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
 
                rmb();
                if (likely(!bnx2_has_work(bnapi))) {
-                       netif_rx_complete(bp->dev, napi);
+                       netif_rx_complete(napi);
                        if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
                                REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
                                       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
index 24d2ae8b74bf1c522fc52efc86b4bd647c3457b6..02ab9b0ea697bd6123fdbb18bc6f3b81c1ea978e 100644 (file)
@@ -1615,7 +1615,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
        prefetch(&fp->status_blk->c_status_block.status_block_index);
        prefetch(&fp->status_blk->u_status_block.status_block_index);
 
-       netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
+       netif_rx_schedule(&bnx2x_fp(bp, index, napi));
 
        return IRQ_HANDLED;
 }
@@ -1654,7 +1654,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
                prefetch(&fp->status_blk->c_status_block.status_block_index);
                prefetch(&fp->status_blk->u_status_block.status_block_index);
 
-               netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
+               netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
 
                status &= ~mask;
        }
@@ -9284,7 +9284,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
 #ifdef BNX2X_STOP_ON_ERROR
 poll_panic:
 #endif
-               netif_rx_complete(bp->dev, napi);
+               netif_rx_complete(napi);
 
                bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
                             le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
index 023d205e90543c0ebcb37137023a2d9c9cc7435c..321f43d9f0e2caf56a2fb4263e338bc34a80f94e 100644 (file)
@@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
        if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
 #ifdef USE_NAPI
                cas_mask_intr(cp);
-               netif_rx_schedule(dev, &cp->napi);
+               netif_rx_schedule(&cp->napi);
 #else
                cas_rx_ringN(cp, ring, 0);
 #endif
@@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id)
        if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
 #ifdef USE_NAPI
                cas_mask_intr(cp);
-               netif_rx_schedule(dev, &cp->napi);
+               netif_rx_schedule(&cp->napi);
 #else
                cas_rx_ringN(cp, 1, 0);
 #endif
@@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id)
        if (status & INTR_RX_DONE) {
 #ifdef USE_NAPI
                cas_mask_intr(cp);
-               netif_rx_schedule(dev, &cp->napi);
+               netif_rx_schedule(&cp->napi);
 #else
                cas_rx_ringN(cp, 0, 0);
 #endif
@@ -2691,7 +2691,7 @@ rx_comp:
 #endif
        spin_unlock_irqrestore(&cp->lock, flags);
        if (enable_intr) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                cas_unmask_intr(cp);
        }
        return credits;
index 1da70070c2fa6626400de6df033fa30467f434cd..7896468dda119786f36f0f158a3edebfeed58928 100644 (file)
@@ -1613,7 +1613,7 @@ int t1_poll(struct napi_struct *napi, int budget)
        int work_done = process_responses(adapter, budget);
 
        if (likely(work_done < budget)) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                writel(adapter->sge->respQ.cidx,
                       adapter->regs + A_SG_SLEEPING);
        }
@@ -1633,7 +1633,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
 
                if (napi_schedule_prep(&adapter->napi)) {
                        if (process_pure_responses(adapter))
-                               __netif_rx_schedule(dev, &adapter->napi);
+                               __netif_rx_schedule(&adapter->napi);
                        else {
                                /* no data, no NAPI needed */
                                writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
index d39a77cba1afe0c70214bd02b78df9ea8333a1e2..f66548751c384c1bf111f69b995dd701aa60b713 100644 (file)
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
                        printk(KERN_WARNING "%s: rx: polling, but no queue\n",
                               priv->dev->name);
                spin_unlock(&priv->rx_lock);
-               netif_rx_complete(priv->dev, napi);
+               netif_rx_complete(napi);
                return 0;
        }
 
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
        if (processed == 0) {
                /* we ran out of packets to read,
                 * revert to interrupt-driven mode */
-               netif_rx_complete(priv->dev, napi);
+               netif_rx_complete(napi);
                cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
                return 0;
        }
@@ -536,7 +536,7 @@ fatal_error:
        }
 
        spin_unlock(&priv->rx_lock);
-       netif_rx_complete(priv->dev, napi);
+       netif_rx_complete(napi);
        netif_tx_stop_all_queues(priv->dev);
        napi_disable(&priv->napi);
 
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
 
        if (status & MAC_INT_RX) {
                queue = (status >> 8) & 7;
-               if (netif_rx_schedule_prep(dev, &priv->napi)) {
+               if (netif_rx_schedule_prep(&priv->napi)) {
                        cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
-                       __netif_rx_schedule(dev, &priv->napi);
+                       __netif_rx_schedule(&priv->napi);
                }
        }
 
index dce7ff28c3ff4087b56ada70aff23ec29f90866d..9f38b16ccbbd1b58f308225d15e59f94f1585a1c 100644 (file)
@@ -2049,9 +2049,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
        if(stat_ack & stat_ack_rnr)
                nic->ru_running = RU_SUSPENDED;
 
-       if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
+       if(likely(netif_rx_schedule_prep(&nic->napi))) {
                e100_disable_irq(nic);
-               __netif_rx_schedule(netdev, &nic->napi);
+               __netif_rx_schedule(&nic->napi);
        }
 
        return IRQ_HANDLED;
@@ -2060,7 +2060,6 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
 static int e100_poll(struct napi_struct *napi, int budget)
 {
        struct nic *nic = container_of(napi, struct nic, napi);
-       struct net_device *netdev = nic->netdev;
        unsigned int work_done = 0;
 
        e100_rx_clean(nic, &work_done, budget);
@@ -2068,7 +2067,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                e100_enable_irq(nic);
        }
 
index 116c96e0b1191278926b02518669de024e630f6f..26474c92193f705a3b66e413fa5773da38ce87f2 100644 (file)
@@ -3687,12 +3687,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+       if (likely(netif_rx_schedule_prep(&adapter->napi))) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
                adapter->total_rx_bytes = 0;
                adapter->total_rx_packets = 0;
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        } else
                e1000_irq_enable(adapter);
 
@@ -3747,12 +3747,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
                ew32(IMC, ~0);
                E1000_WRITE_FLUSH();
        }
-       if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+       if (likely(netif_rx_schedule_prep(&adapter->napi))) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
                adapter->total_rx_bytes = 0;
                adapter->total_rx_packets = 0;
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        } else
                /* this really should not happen! if it does it is basically a
                 * bug, but not a hard error, so enable ints and continue */
@@ -3793,7 +3793,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                if (likely(adapter->itr_setting & 3))
                        e1000_set_itr(adapter);
-               netif_rx_complete(poll_dev, napi);
+               netif_rx_complete(napi);
                e1000_irq_enable(adapter);
        }
 
index f7b05609073df59edd4f8065c1c1f039affbf15b..d4639facd1bddcb72a200ada597d02fc284529e7 100644 (file)
@@ -1179,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+       if (netif_rx_schedule_prep(&adapter->napi)) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
                adapter->total_rx_bytes = 0;
                adapter->total_rx_packets = 0;
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        }
 
        return IRQ_HANDLED;
@@ -1246,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+       if (netif_rx_schedule_prep(&adapter->napi)) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
                adapter->total_rx_bytes = 0;
                adapter->total_rx_packets = 0;
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        }
 
        return IRQ_HANDLED;
@@ -1320,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
                adapter->rx_ring->set_itr = 0;
        }
 
-       if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+       if (netif_rx_schedule_prep(&adapter->napi)) {
                adapter->total_rx_bytes = 0;
                adapter->total_rx_packets = 0;
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        }
        return IRQ_HANDLED;
 }
@@ -2028,7 +2028,7 @@ clean_rx:
        if (work_done < budget) {
                if (adapter->itr_setting & 3)
                        e1000_set_itr(adapter);
-               netif_rx_complete(poll_dev, napi);
+               netif_rx_complete(napi);
                if (adapter->msix_entries)
                        ew32(IMS, adapter->rx_ring->ims_val);
                else
index 44c9ae18383f837056a969d1775d9d28f7ecc7e6..035aa7dfc5cd867e40f72f5bd06ef2b37cb01cf1 100644 (file)
@@ -830,7 +830,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
        while ((rx != budget) || force_irq) {
                pr->poll_counter = 0;
                force_irq = 0;
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                ehea_reset_cq_ep(pr->recv_cq);
                ehea_reset_cq_ep(pr->send_cq);
                ehea_reset_cq_n1(pr->recv_cq);
@@ -859,7 +859,7 @@ static void ehea_netpoll(struct net_device *dev)
        int i;
 
        for (i = 0; i < port->num_def_qps; i++)
-               netif_rx_schedule(dev, &port->port_res[i].napi);
+               netif_rx_schedule(&port->port_res[i].napi);
 }
 #endif
 
@@ -867,7 +867,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 {
        struct ehea_port_res *pr = param;
 
-       netif_rx_schedule(pr->port->netdev, &pr->napi);
+       netif_rx_schedule(&pr->napi);
 
        return IRQ_HANDLED;
 }
index deddd76a550cbb0f2958a869daa2e5f25038c09e..d039e16f276355a1552e4b6be6a7aecdf1bd0b66 100644 (file)
@@ -411,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
        }
 
        if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
-               if (netif_rx_schedule_prep(netdev, &enic->napi))
-                       __netif_rx_schedule(netdev, &enic->napi);
+               if (netif_rx_schedule_prep(&enic->napi))
+                       __netif_rx_schedule(&enic->napi);
        } else {
                vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
        }
@@ -440,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
         * writes).
         */
 
-       netif_rx_schedule(enic->netdev, &enic->napi);
+       netif_rx_schedule(&enic->napi);
 
        return IRQ_HANDLED;
 }
@@ -450,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
        struct enic *enic = data;
 
        /* schedule NAPI polling for RQ cleanup */
-       netif_rx_schedule(enic->netdev, &enic->napi);
+       netif_rx_schedule(&enic->napi);
 
        return IRQ_HANDLED;
 }
@@ -1068,7 +1068,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                if (netdev->features & NETIF_F_LRO)
                        lro_flush_all(&enic->lro_mgr);
 
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
        }
 
@@ -1112,7 +1112,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                if (netdev->features & NETIF_F_LRO)
                        lro_flush_all(&enic->lro_mgr);
 
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
        }
 
index 4a951b8cb4d73cb1db4ca264efb895a6b6529685..f9b37c80dda61f7eb84ca32a26dc93789d57c678 100644 (file)
@@ -1109,9 +1109,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
 
        if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
                spin_lock(&ep->napi_lock);
-               if (netif_rx_schedule_prep(dev, &ep->napi)) {
+               if (netif_rx_schedule_prep(&ep->napi)) {
                        epic_napi_irq_off(dev, ep);
-                       __netif_rx_schedule(dev, &ep->napi);
+                       __netif_rx_schedule(&ep->napi);
                } else
                        ep->reschedule_in_poll++;
                spin_unlock(&ep->napi_lock);
@@ -1288,7 +1288,7 @@ rx_action:
 
                more = ep->reschedule_in_poll;
                if (!more) {
-                       __netif_rx_complete(dev, napi);
+                       __netif_rx_complete(napi);
                        outl(EpicNapiEvent, ioaddr + INTSTAT);
                        epic_napi_irq_on(dev, ep);
                } else
index 1f2b24743ee9ec0fc1a76c893e7bfa023940f218..9fbfa856ae5b27937a00eb822b0edef7aaabc0ab 100644 (file)
@@ -1760,7 +1760,7 @@ static void nv_do_rx_refill(unsigned long data)
        struct fe_priv *np = netdev_priv(dev);
 
        /* Just reschedule NAPI rx processing */
-       netif_rx_schedule(dev, &np->napi);
+       netif_rx_schedule(&np->napi);
 }
 #else
 static void nv_do_rx_refill(unsigned long data)
@@ -3403,7 +3403,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
 
 #ifdef CONFIG_FORCEDETH_NAPI
                if (events & NVREG_IRQ_RX_ALL) {
-                       netif_rx_schedule(dev, &np->napi);
+                       netif_rx_schedule(&np->napi);
 
                        /* Disable furthur receive irq's */
                        spin_lock(&np->lock);
@@ -3520,7 +3520,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
 
 #ifdef CONFIG_FORCEDETH_NAPI
                if (events & NVREG_IRQ_RX_ALL) {
-                       netif_rx_schedule(dev, &np->napi);
+                       netif_rx_schedule(&np->napi);
 
                        /* Disable furthur receive irq's */
                        spin_lock(&np->lock);
@@ -3678,7 +3678,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
                /* re-enable receive interrupts */
                spin_lock_irqsave(&np->lock, flags);
 
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
 
                np->irqmask |= NVREG_IRQ_RX_ALL;
                if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -3704,7 +3704,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
        writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
 
        if (events) {
-               netif_rx_schedule(dev, &np->napi);
+               netif_rx_schedule(&np->napi);
                /* disable receive interrupts on the nic */
                writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
                pci_push(base);
index df66d620b11555385642935d1a73f651981b52b0..4e6a9195fe5f6503e5bd609098a203186fab936c 100644 (file)
@@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
 
        if (received < budget) {
                /* done */
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                (*fep->ops->napi_enable_rx)(dev);
        }
        return received;
@@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id)
                                /* NOTE: it is possible for FCCs in NAPI mode    */
                                /* to submit a spurious interrupt while in poll  */
                                if (napi_ok)
-                                       __netif_rx_schedule(dev, &fep->napi);
+                                       __netif_rx_schedule(&fep->napi);
                        }
                }
 
index 13f49643ba0bed2d59443af6c14d5988371a88e2..c672ecfc95957f24cc3cb0f358bface91b0a2fbd 100644 (file)
@@ -1607,9 +1607,9 @@ static int gfar_clean_tx_ring(struct net_device *dev)
 static void gfar_schedule_cleanup(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       if (netif_rx_schedule_prep(dev, &priv->napi)) {
+       if (netif_rx_schedule_prep(&priv->napi)) {
                gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
-               __netif_rx_schedule(dev, &priv->napi);
+               __netif_rx_schedule(&priv->napi);
        }
 }
 
@@ -1863,7 +1863,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
                return budget;
 
        if (rx_cleaned < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
                /* Clear the halt bit in RSTAT */
                gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
index 02ecfdb4df6b796d54297efb99cea723f92e0676..1f055a9550896824656b5aeb8f7d42b036c3bb0b 100644 (file)
@@ -1028,7 +1028,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
 
                ibmveth_assert(lpar_rc == H_SUCCESS);
 
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
 
                if (ibmveth_rxq_pending_buffer(adapter) &&
                    netif_rx_reschedule(netdev, napi)) {
@@ -1047,11 +1047,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
        struct ibmveth_adapter *adapter = netdev_priv(netdev);
        unsigned long lpar_rc;
 
-       if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+       if (netif_rx_schedule_prep(&adapter->napi)) {
                lpar_rc = h_vio_signal(adapter->vdev->unit_address,
                                       VIO_IRQ_DISABLE);
                ibmveth_assert(lpar_rc == H_SUCCESS);
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        }
        return IRQ_HANDLED;
 }
index 25df7c9310647080c5da278b798c613fca408374..6a40d9486daf0ed61a4c1dd50a59aae11df9999a 100644 (file)
@@ -3347,8 +3347,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
 
        igb_write_itr(rx_ring);
 
-       if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
-               __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
+       if (netif_rx_schedule_prep(&rx_ring->napi))
+               __netif_rx_schedule(&rx_ring->napi);
 
 #ifdef CONFIG_IGB_DCA
        if (adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3500,7 +3500,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+       netif_rx_schedule(&adapter->rx_ring[0].napi);
 
        return IRQ_HANDLED;
 }
@@ -3538,7 +3538,7 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+       netif_rx_schedule(&adapter->rx_ring[0].napi);
 
        return IRQ_HANDLED;
 }
@@ -3573,7 +3573,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
            !netif_running(netdev)) {
                if (adapter->itr_setting & 3)
                        igb_set_itr(adapter);
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                if (!test_bit(__IGB_DOWN, &adapter->state))
                        igb_irq_enable(adapter);
                return 0;
@@ -3599,7 +3599,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
 
        /* If not enough Rx work done, exit the polling mode */
        if ((work_done == 0) || !netif_running(netdev)) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
 
                if (adapter->itr_setting & 3) {
                        if (adapter->num_rx_queues == 1)
index 820a92cc7f620d7895fbe87b39b9a5db4603399c..679125b3bbc9c44210f4026b867aa791eec10d36 100644 (file)
@@ -1721,14 +1721,14 @@ ixgb_intr(int irq, void *data)
                if (!test_bit(__IXGB_DOWN, &adapter->flags))
                        mod_timer(&adapter->watchdog_timer, jiffies);
 
-       if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+       if (netif_rx_schedule_prep(&adapter->napi)) {
 
                /* Disable interrupts and register for poll. The flush
                  of the posted write is intentionally left out.
                */
 
                IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
-               __netif_rx_schedule(netdev, &adapter->napi);
+               __netif_rx_schedule(&adapter->napi);
        }
        return IRQ_HANDLED;
 }
@@ -1750,7 +1750,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                if (!test_bit(__IXGB_DOWN, &adapter->flags))
                        ixgb_irq_enable(adapter);
        }
index 92b35cfc7a465dfe4e060c03b2fc7af16ae22029..b6ae9f674ba539e210e4c59e77f417ac62cacfcb 100644 (file)
@@ -1012,7 +1012,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        rx_ring = &(adapter->rx_ring[r_idx]);
        /* disable interrupts on this vector only */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
-       netif_rx_schedule(adapter->netdev, &q_vector->napi);
+       netif_rx_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -1053,7 +1053,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
 
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
-               netif_rx_complete(adapter->netdev, napi);
+               netif_rx_complete(napi);
                if (adapter->itr_setting & 3)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1102,7 +1102,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
        rx_ring = &(adapter->rx_ring[r_idx]);
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
-               netif_rx_complete(adapter->netdev, napi);
+               netif_rx_complete(napi);
                if (adapter->itr_setting & 3)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1378,13 +1378,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 
        ixgbe_check_fan_failure(adapter, eicr);
 
-       if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
+       if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) {
                adapter->tx_ring[0].total_packets = 0;
                adapter->tx_ring[0].total_bytes = 0;
                adapter->rx_ring[0].total_packets = 0;
                adapter->rx_ring[0].total_bytes = 0;
                /* would disable interrupts here but EIAM disabled it */
-               __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
+               __netif_rx_schedule(&adapter->q_vector[0].napi);
        }
 
        return IRQ_HANDLED;
@@ -2308,7 +2308,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               netif_rx_complete(adapter->netdev, napi);
+               netif_rx_complete(napi);
                if (adapter->itr_setting & 3)
                        ixgbe_set_itr(adapter);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
index bd96dbc8e0219c459587f0df9e640a67c8863254..014745720560df448b6654c0f051093505838e46 100644 (file)
@@ -141,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget)
                        break;
        } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
 
-       netif_rx_complete(dev, napi);
+       netif_rx_complete(napi);
        ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
 
        return rx;
@@ -204,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
 
                ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
                if (likely(napi_schedule_prep(&ip->napi))) {
-                       __netif_rx_schedule(dev, &ip->napi);
+                       __netif_rx_schedule(&ip->napi);
                } else {
                        printk(KERN_CRIT "ixp2000: irq while polling!!\n");
                }
index 15035cb1738a44b24dce5f10089fa2a794713576..08b34051c646d6e4028bb30847ad793c84aedd00 100644 (file)
@@ -1250,7 +1250,6 @@ static int
 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 {
        struct jme_adapter *jme = jme_napi_priv(holder);
-       struct net_device *netdev = jme->dev;
        int rest;
 
        rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
index adaf3ddbf7835387bf6bca79f024d65919739ccb..2d6f30e638fca290dc599e009f80df8be826c90a 100644 (file)
@@ -398,15 +398,15 @@ struct jme_ring {
 #define JME_NAPI_WEIGHT(w) int w
 #define JME_NAPI_WEIGHT_VAL(w) w
 #define JME_NAPI_WEIGHT_SET(w, r)
-#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis)
 #define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
 #define JME_NAPI_DISABLE(priv) \
        if (!napi_disable_pending(&priv->napi)) \
                napi_disable(&priv->napi);
 #define JME_RX_SCHEDULE_PREP(priv) \
-       netif_rx_schedule_prep(priv->dev, &priv->napi)
+       netif_rx_schedule_prep(&priv->napi)
 #define JME_RX_SCHEDULE(priv) \
-       __netif_rx_schedule(priv->dev, &priv->napi);
+       __netif_rx_schedule(&priv->napi);
 
 /*
  * Jmac Adapter Private data
index 63626953f07e471780e4ff831496e6c9348a6d6f..4a5580c1126a00b383e4588b4ab6a2fa5376845e 100644 (file)
@@ -327,7 +327,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
 
        dmas = readl(&lp->rx_dma_regs->dmas);
        if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
-               netif_rx_schedule_prep(dev, &lp->napi);
+               netif_rx_schedule_prep(&lp->napi);
 
                dmasm = readl(&lp->rx_dma_regs->dmasm);
                writel(dmasm | (DMA_STAT_DONE |
@@ -466,7 +466,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
 
        work_done = korina_rx(dev, budget);
        if (work_done < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
                writel(readl(&lp->rx_dma_regs->dmasm) &
                        ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
index 261b9507124b695e0ecf8fe91a5ea5cfae8e7f59..a04da4ecaa8811a8be09450389e33288ebe2d80e 100644 (file)
@@ -519,7 +519,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
                 * this function was called last time, and no packets
                 * have been received since.
                 */
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                goto out;
        }
 
@@ -530,13 +530,13 @@ static int macb_poll(struct napi_struct *napi, int budget)
                dev_warn(&bp->pdev->dev,
                         "No RX buffers complete, status = %02lx\n",
                         (unsigned long)status);
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                goto out;
        }
 
        work_done = macb_rx(bp, budget);
        if (work_done < budget)
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
        /*
         * We've done what we can to clean the buffers. Make sure we
@@ -571,7 +571,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                }
 
                if (status & MACB_RX_INT_FLAGS) {
-                       if (netif_rx_schedule_prep(dev, &bp->napi)) {
+                       if (netif_rx_schedule_prep(&bp->napi)) {
                                /*
                                 * There's no point taking any more interrupts
                                 * until we have processed the buffers
@@ -579,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                                macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
                                dev_dbg(&bp->pdev->dev,
                                        "scheduling RX softirq\n");
-                               __netif_rx_schedule(dev, &bp->napi);
+                               __netif_rx_schedule(&bp->napi);
                        }
                }
 
index ffe28089b6873041a978b41b57a3520c66942157..c61b0bdca1a433c33771784d88fb8f6eae3f092d 100644 (file)
@@ -814,7 +814,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
        struct mlx4_en_priv *priv = netdev_priv(cq->dev);
 
        if (priv->port_up)
-               netif_rx_schedule(cq->dev, &cq->napi);
+               netif_rx_schedule(&cq->napi);
        else
                mlx4_en_arm_cq(priv, cq);
 }
@@ -834,7 +834,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
                INC_PERF_COUNTER(priv->pstats.napi_quota);
        else {
                /* Done for now */
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                mlx4_en_arm_cq(priv, cq);
        }
        return done;
index f017c774e1a4c8ef58a73e1c3126e0a2431fe7ef..378c89e6c7d52168b19353fea1fac84bcd439a41 100644 (file)
@@ -1515,7 +1515,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
        work_done = myri10ge_clean_rx_done(ss, budget);
 
        if (work_done < budget) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                put_be32(htonl(3), ss->irq_claim);
        }
        return work_done;
@@ -1533,7 +1533,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
        /* an interrupt on a non-zero receive-only slice is implicitly
         * valid  since MSI-X irqs are not shared */
        if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
-               netif_rx_schedule(ss->dev, &ss->napi);
+               netif_rx_schedule(&ss->napi);
                return (IRQ_HANDLED);
        }
 
@@ -1544,7 +1544,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
        /* low bit indicates receives are present, so schedule
         * napi poll handler */
        if (stats->valid & 1)
-               netif_rx_schedule(ss->dev, &ss->napi);
+               netif_rx_schedule(&ss->napi);
 
        if (!mgp->msi_enabled && !mgp->msix_enabled) {
                put_be32(0, mgp->irq_deassert);
index 9f81fcb968824386efa12fe3fe97967c2dc02fdd..478edb92bca35581509f80a7709354c00a2f3d17 100644 (file)
@@ -2193,10 +2193,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
 
        prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
 
-       if (netif_rx_schedule_prep(dev, &np->napi)) {
+       if (netif_rx_schedule_prep(&np->napi)) {
                /* Disable interrupts and register for poll */
                natsemi_irq_disable(dev);
-               __netif_rx_schedule(dev, &np->napi);
+               __netif_rx_schedule(&np->napi);
        } else
                printk(KERN_WARNING
                       "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
@@ -2248,7 +2248,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
                np->intr_status = readl(ioaddr + IntrStatus);
        } while (np->intr_status);
 
-       netif_rx_complete(dev, napi);
+       netif_rx_complete(napi);
 
        /* Reenable interrupts providing nothing is trying to shut
         * the chip down. */
index 6876bfd4455aa3a8c3e506bb878426202678955c..ba01524b5531a46d7ced8efd6470ad825601c23e 100644 (file)
@@ -1583,7 +1583,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
        }
 
        if ((work_done < budget) && tx_complete) {
-               netif_rx_complete(adapter->netdev, &adapter->napi);
+               netif_rx_complete(&adapter->napi);
                netxen_nic_enable_int(adapter);
        }
 
index f219f16ec97a25b65f76c43928ca1f59a2ff077a..5698c155bbf3ffa473891ad15f448fd2db00e899 100644 (file)
@@ -3669,7 +3669,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
        work_done = niu_poll_core(np, lp, budget);
 
        if (work_done < budget) {
-               netif_rx_complete(np->dev, napi);
+               netif_rx_complete(napi);
                niu_ldg_rearm(np, lp, 1);
        }
        return work_done;
@@ -4088,12 +4088,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
                              u64 v0, u64 v1, u64 v2)
 {
-       if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
+       if (likely(netif_rx_schedule_prep(&lp->napi))) {
                lp->v0 = v0;
                lp->v1 = v1;
                lp->v2 = v2;
                __niu_fastpath_interrupt(np, lp->ldg_num, v0);
-               __netif_rx_schedule(np->dev, &lp->napi);
+               __netif_rx_schedule(&lp->napi);
        }
 }
 
index fcbf6ccd0a85480125d63baef07b1a283abf9752..dcd199045613f3bbc4921ae2bee71d92f743ff62 100644 (file)
@@ -971,7 +971,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
        if (*chan->status & PAS_STATUS_ERROR)
                reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
 
-       netif_rx_schedule(dev, &mac->napi);
+       netif_rx_schedule(&mac->napi);
 
        write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
 
@@ -1011,7 +1011,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
 
        mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
 
-       netif_rx_schedule(mac->netdev, &mac->napi);
+       netif_rx_schedule(&mac->napi);
 
        if (reg)
                write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
@@ -1641,7 +1641,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
        pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
        if (pkts < budget) {
                /* all done, no more packets present */
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
                pasemi_mac_restart_rx_intr(mac);
                pasemi_mac_restart_tx_intr(mac);
index f2b192c80e175b5dc06e8c215e8f00a4184a2dbe..044b7b07f5f48cf83ebbb8c7adce3134a3470f04 100644 (file)
@@ -1397,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                spin_lock_irqsave(&lp->lock, flags);
 
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
 
                /* clear interrupt masks */
                val = lp->a.read_csr(ioaddr, CSR3);
@@ -2586,14 +2586,14 @@ pcnet32_interrupt(int irq, void *dev_id)
                                       dev->name, csr0);
                        /* unlike for the lance, there is no restart needed */
                }
-               if (netif_rx_schedule_prep(dev, &lp->napi)) {
+               if (netif_rx_schedule_prep(&lp->napi)) {
                        u16 val;
                        /* set interrupt masks */
                        val = lp->a.read_csr(ioaddr, CSR3);
                        val |= 0x5f00;
                        lp->a.write_csr(ioaddr, CSR3, val);
                        mmiowb();
-                       __netif_rx_schedule(dev, &lp->napi);
+                       __netif_rx_schedule(&lp->napi);
                        break;
                }
                csr0 = lp->a.read_csr(ioaddr, CSR0);
index 6b7ed1a5b3b7bf8b34e8a0e61ec4a5eb34034029..33e8e62b450212f281cc9e6e641a73ea12d1e492 100644 (file)
@@ -2293,7 +2293,7 @@ static int ql_poll(struct napi_struct *napi, int budget)
 
        if (tx_cleaned + rx_cleaned != budget) {
                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-               __netif_rx_complete(ndev, napi);
+               __netif_rx_complete(napi);
                ql_update_small_bufq_prod_index(qdev);
                ql_update_lrg_bufq_prod_index(qdev);
                writel(qdev->rsp_consumer_index,
@@ -2352,8 +2352,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
                spin_unlock(&qdev->adapter_lock);
        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
                ql_disable_interrupts(qdev);
-               if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) {
-                       __netif_rx_schedule(ndev, &qdev->napi);
+               if (likely(netif_rx_schedule_prep(&qdev->napi))) {
+                       __netif_rx_schedule(&qdev->napi);
                }
        } else {
                return IRQ_NONE;
index 225930fda5af418ec324412f58302813c1bbe5c8..02147082786d5add5219b64fa66eb9aa7a69a55d 100644 (file)
@@ -1647,7 +1647,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
                rx_ring->cq_id);
 
        if (work_done < budget) {
-               __netif_rx_complete(qdev->ndev, napi);
+               __netif_rx_complete(napi);
                ql_enable_completion_interrupt(qdev, rx_ring->irq);
        }
        return work_done;
@@ -1733,7 +1733,7 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
 {
        struct rx_ring *rx_ring = dev_id;
        struct ql_adapter *qdev = rx_ring->qdev;
-       netif_rx_schedule(qdev->ndev, &rx_ring->napi);
+       netif_rx_schedule(&rx_ring->napi);
        return IRQ_HANDLED;
 }
 
@@ -1819,8 +1819,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
                                                              &rx_ring->rx_work,
                                                              0);
                                else
-                                       netif_rx_schedule(qdev->ndev,
-                                                         &rx_ring->napi);
+                                       netif_rx_schedule(&rx_ring->napi);
                                work_done++;
                        }
                }
index aff1cc627c05b3069b90a9c8843b04cc9fad1d03..53bbddfc8c954ec17381230a4a491374f4a7404e 100644 (file)
@@ -667,7 +667,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
        work_done = r6040_rx(dev, budget);
 
        if (work_done < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                /* Enable RX interrupt */
                iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
        }
@@ -704,7 +704,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
 
                /* Mask off RX interrupt */
                misr &= ~RX_INTS;
-               netif_rx_schedule(dev, &lp->napi);
+               netif_rx_schedule(&lp->napi);
        }
 
        /* TX interrupt request */
index dddf6aeff498ef1e4c88a101369481ccc21e4212..2c73ca606b35e0942e31a033791f550ed4db481c 100644 (file)
@@ -3581,8 +3581,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
                tp->intr_mask = ~tp->napi_event;
 
-               if (likely(netif_rx_schedule_prep(dev, &tp->napi)))
-                       __netif_rx_schedule(dev, &tp->napi);
+               if (likely(netif_rx_schedule_prep(&tp->napi)))
+                       __netif_rx_schedule(&tp->napi);
                else if (netif_msg_intr(tp)) {
                        printk(KERN_INFO "%s: interrupt %04x in poll\n",
                               dev->name, status);
@@ -3603,7 +3603,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
        rtl8169_tx_interrupt(dev, tp, ioaddr);
 
        if (work_done < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                tp->intr_mask = 0xffff;
                /*
                 * 20040426: the barrier is not strictly required but the
index 1b489df80fa675b88d9cc81de87b3e6f0458aee6..512861923c6bcfd82a76c7d6ec27e9d61619af0b 100644 (file)
@@ -2852,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
        s2io_chk_rx_buffers(nic, ring);
 
        if (pkts_processed < budget_org) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                /*Re Enable MSI-Rx Vector*/
                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
                addr += 7 - ring->ring_no;
@@ -2890,7 +2890,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
                        break;
        }
        if (pkts_processed < budget_org) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                /* Re enable the Rx interrupts for the ring */
                writeq(0, &bar0->rx_traffic_mask);
                readl(&bar0->rx_traffic_mask);
@@ -4344,7 +4344,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
                val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
                writeb(val8, addr);
                val8 = readb(addr);
-               netif_rx_schedule(dev, &ring->napi);
+               netif_rx_schedule(&ring->napi);
        } else {
                rx_intr_handler(ring, 0);
                s2io_chk_rx_buffers(sp, ring);
@@ -4791,7 +4791,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
 
                if (config->napi) {
                        if (reason & GEN_INTR_RXTRAFFIC) {
-                               netif_rx_schedule(dev, &sp->napi);
+                               netif_rx_schedule(&sp->napi);
                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
                                readl(&bar0->rx_traffic_int);
index 480caec1e0248ecc163c990f9f5a3a1dcab415b5..31e38fae017f8def7273d568089914599a1b54d6 100644 (file)
@@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
                sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
 
        if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
-               if (netif_rx_schedule_prep(dev, &sc->napi)) {
+               if (netif_rx_schedule_prep(&sc->napi)) {
                        __raw_writeq(0, sc->sbm_imr);
-                       __netif_rx_schedule(dev, &sc->napi);
+                       __netif_rx_schedule(&sc->napi);
                        /* Depend on the exit from poll to reenable intr */
                }
                else {
@@ -2667,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
        sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
 
        if (work_done < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
 #ifdef CONFIG_SBMAC_COALESCE
                __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
index 086629c0fe57bcf9a0ea97f8630eb83eeea4babb..42934ba2030d9d08d8c1b0ec987c932de298b1c4 100644 (file)
@@ -230,7 +230,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
                 * since efx_channel_processed() will have no effect if
                 * interrupts have already been disabled.
                 */
-               netif_rx_complete(napi_dev, napi);
+               netif_rx_complete(napi);
                efx_channel_processed(channel);
        }
 
index dd0d45b9e71f2d4a7dc76fc87f28844dd25ad815..0dd7a532c78a4b06a5c92209d763313570983556 100644 (file)
@@ -77,7 +77,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
                  channel->channel, raw_smp_processor_id());
        channel->work_pending = true;
 
-       netif_rx_schedule(channel->napi_dev, &channel->napi_str);
+       netif_rx_schedule(&channel->napi_str);
 }
 
 #endif /* EFX_EFX_H */
index f73ee7974003fe293a39703d443c384d9c481f94..c9dbb06f8c9430cfb06779a8486544792972996a 100644 (file)
@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
                unsigned long flags;
 
                spin_lock_irqsave(&hw->hw_lock, flags);
-               __netif_rx_complete(dev, napi);
+               __netif_rx_complete(napi);
                hw->intr_mask |= napimask[skge->port];
                skge_write32(hw, B0_IMSK, hw->intr_mask);
                skge_read32(hw, B0_IMSK);
@@ -3377,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
        if (status & (IS_XA1_F|IS_R1_F)) {
                struct skge_port *skge = netdev_priv(hw->dev[0]);
                hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
-               netif_rx_schedule(hw->dev[0], &skge->napi);
+               netif_rx_schedule(&skge->napi);
        }
 
        if (status & IS_PA_TO_TX1)
@@ -3397,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
 
                if (status & (IS_XA2_F|IS_R2_F)) {
                        hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
-                       netif_rx_schedule(hw->dev[1], &skge->napi);
+                       netif_rx_schedule(&skge->napi);
                }
 
                if (status & IS_PA_TO_RX2) {
index fa28542b47d5b677f1a7d3fc03601bef07253d6b..ecdde03d4167984c1afc67865b973e26556abd82 100644 (file)
@@ -984,7 +984,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
                        /* We processed all packets available.  Tell NAPI it can
                         * stop polling then re-enable rx interrupts */
                        smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_);
-                       netif_rx_complete(dev, napi);
+                       netif_rx_complete(napi);
                        temp = smsc911x_reg_read(pdata, INT_EN);
                        temp |= INT_EN_RSFL_EN_;
                        smsc911x_reg_write(pdata, INT_EN, temp);
index 940220f6092135561ab222ac40270fd53e4dc61e..27e017d969667f474b4c647c23c1fffe42a19bbf 100644 (file)
@@ -666,7 +666,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
                        smsc9420_pci_flush_write(pd);
 
                        ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
-                       netif_rx_schedule(pd->dev, &pd->napi);
+                       netif_rx_schedule(&pd->napi);
                }
 
                if (ints_to_clear)
@@ -889,7 +889,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
        smsc9420_pci_flush_write(pd);
 
        if (work_done < budget) {
-               netif_rx_complete(dev, &pd->napi);
+               netif_rx_complete(&pd->napi);
 
                /* re-enable RX DMA interrupts */
                dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
index 325fbc9612c96b57e465e07255cda608ed445f40..c5c123d3af57f00b5930d7cf318f1a3444f5dcd5 100644 (file)
@@ -1302,7 +1302,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
        if (packets_done < budget) {
-               netif_rx_complete(netdev, napi);
+               netif_rx_complete(napi);
                spider_net_rx_irq_on(card);
                card->ignore_rx_ramfull = 0;
        }
@@ -1529,8 +1529,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
                        spider_net_refill_rx_chain(card);
                        spider_net_enable_rxdmac(card);
                        card->num_rx_ints ++;
-                       netif_rx_schedule(card->netdev,
-                                         &card->napi);
+                       netif_rx_schedule(&card->napi);
                }
                show_error = 0;
                break;
@@ -1550,8 +1549,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
                spider_net_refill_rx_chain(card);
                spider_net_enable_rxdmac(card);
                card->num_rx_ints ++;
-               netif_rx_schedule(card->netdev,
-                                 &card->napi);
+               netif_rx_schedule(&card->napi);
                show_error = 0;
                break;
 
@@ -1565,8 +1563,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
                spider_net_refill_rx_chain(card);
                spider_net_enable_rxdmac(card);
                card->num_rx_ints ++;
-               netif_rx_schedule(card->netdev,
-                                 &card->napi);
+               netif_rx_schedule(&card->napi);
                show_error = 0;
                break;
 
@@ -1660,11 +1657,11 @@ spider_net_interrupt(int irq, void *ptr)
 
        if (status_reg & SPIDER_NET_RXINT ) {
                spider_net_rx_irq_off(card);
-               netif_rx_schedule(netdev, &card->napi);
+               netif_rx_schedule(&card->napi);
                card->num_rx_ints ++;
        }
        if (status_reg & SPIDER_NET_TXINT)
-               netif_rx_schedule(netdev, &card->napi);
+               netif_rx_schedule(&card->napi);
 
        if (status_reg & SPIDER_NET_LINKINT)
                spider_net_link_reset(netdev);
index 0358809f409c8d8522d983fd54966b54be78b5ef..d5b9dd842c619e9fbeef4a73f590cabbd3129ac2 100644 (file)
@@ -1290,8 +1290,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
                if (intr_status & (IntrRxDone | IntrRxEmpty)) {
                        u32 enable;
 
-                       if (likely(netif_rx_schedule_prep(dev, &np->napi))) {
-                               __netif_rx_schedule(dev, &np->napi);
+                       if (likely(netif_rx_schedule_prep(&np->napi))) {
+                               __netif_rx_schedule(&np->napi);
                                enable = readl(ioaddr + IntrEnable);
                                enable &= ~(IntrRxDone | IntrRxEmpty);
                                writel(enable, ioaddr + IntrEnable);
@@ -1530,7 +1530,7 @@ static int netdev_poll(struct napi_struct *napi, int budget)
                intr_status = readl(ioaddr + IntrStatus);
        } while (intr_status & (IntrRxDone | IntrRxEmpty));
 
-       netif_rx_complete(dev, napi);
+       netif_rx_complete(napi);
        intr_status = readl(ioaddr + IntrEnable);
        intr_status |= IntrRxDone | IntrRxEmpty;
        writel(intr_status, ioaddr + IntrEnable);
index f4b0beec4d1960b4a0318d52a6164f312f711d9c..8a7460412482d5165f24f74c3a13a1c7d4df333a 100644 (file)
@@ -921,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
                gp->status = readl(gp->regs + GREG_STAT);
        } while (gp->status & GREG_STAT_NAPI);
 
-       __netif_rx_complete(dev, napi);
+       __netif_rx_complete(napi);
        gem_enable_ints(gp);
 
        spin_unlock_irqrestore(&gp->lock, flags);
@@ -944,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
 
        spin_lock_irqsave(&gp->lock, flags);
 
-       if (netif_rx_schedule_prep(dev, &gp->napi)) {
+       if (netif_rx_schedule_prep(&gp->napi)) {
                u32 gem_status = readl(gp->regs + GREG_STAT);
 
                if (gem_status == 0) {
@@ -954,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
                }
                gp->status = gem_status;
                gem_disable_ints(gp);
-               __netif_rx_schedule(dev, &gp->napi);
+               __netif_rx_schedule(&gp->napi);
        }
 
        spin_unlock_irqrestore(&gp->lock, flags);
index 308f365270e9e6e7e904208011e5afdf82e91b3a..bcd0e60cbda98984c9383f55cc4df671510d33a1 100644 (file)
@@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
        if (!(dmactl & DMA_IntMask)) {
                /* disable interrupts */
                tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
-               if (netif_rx_schedule_prep(dev, &lp->napi))
-                       __netif_rx_schedule(dev, &lp->napi);
+               if (netif_rx_schedule_prep(&lp->napi))
+                       __netif_rx_schedule(&lp->napi);
                else {
                        printk(KERN_ERR "%s: interrupt taken in poll\n",
                               dev->name);
@@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
        spin_unlock(&lp->lock);
 
        if (received < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                /* enable interrupts */
                tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
        }
index 5b83fbb02013fa57d64a0e290ddba485e84f8a7f..a10a83a11d9fb07a02d10d2f8b1fad1f13c26a71 100644 (file)
@@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
                bdx_isr_extra(priv, isr);
 
        if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
-               if (likely(netif_rx_schedule_prep(ndev, &priv->napi))) {
-                       __netif_rx_schedule(ndev, &priv->napi);
+               if (likely(netif_rx_schedule_prep(&priv->napi))) {
+                       __netif_rx_schedule(&priv->napi);
                        RET(IRQ_HANDLED);
                } else {
                        /* NOTE: we get here if intr has slipped into window
@@ -289,7 +289,6 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
 static int bdx_poll(struct napi_struct *napi, int budget)
 {
        struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
-       struct net_device *dev = priv->ndev;
        int work_done;
 
        ENTER;
@@ -303,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
                 * device lock and allow waiting tasks (eg rmmod) to advance) */
                priv->napi_stop = 0;
 
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                bdx_enable_interrupts(priv);
        }
        return work_done;
index 7971d802508de11118b4f616f07f8dbdd65016f7..04ae1e86aeaa4c8b05ad6de192034f43e439420d 100644 (file)
@@ -4451,7 +4451,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
                        sblk->status &= ~SD_STATUS_UPDATED;
 
                if (likely(!tg3_has_work(tp))) {
-                       netif_rx_complete(tp->dev, napi);
+                       netif_rx_complete(napi);
                        tg3_restart_ints(tp);
                        break;
                }
@@ -4461,7 +4461,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
 
 tx_recovery:
        /* work_done is guaranteed to be less than budget. */
-       netif_rx_complete(tp->dev, napi);
+       netif_rx_complete(napi);
        schedule_work(&tp->reset_task);
        return work_done;
 }
@@ -4510,7 +4510,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
        prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
 
        if (likely(!tg3_irq_sync(tp)))
-               netif_rx_schedule(dev, &tp->napi);
+               netif_rx_schedule(&tp->napi);
 
        return IRQ_HANDLED;
 }
@@ -4535,7 +4535,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
         */
        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
        if (likely(!tg3_irq_sync(tp)))
-               netif_rx_schedule(dev, &tp->napi);
+               netif_rx_schedule(&tp->napi);
 
        return IRQ_RETVAL(1);
 }
@@ -4577,7 +4577,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
        sblk->status &= ~SD_STATUS_UPDATED;
        if (likely(tg3_has_work(tp))) {
                prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
-               netif_rx_schedule(dev, &tp->napi);
+               netif_rx_schedule(&tp->napi);
        } else {
                /* No work, shared interrupt perhaps?  re-enable
                 * interrupts, and flush that PCI write
@@ -4623,7 +4623,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
        tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
        if (tg3_irq_sync(tp))
                goto out;
-       if (netif_rx_schedule_prep(dev, &tp->napi)) {
+       if (netif_rx_schedule_prep(&tp->napi)) {
                prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
                /* Update last_tag to mark that this status has been
                 * seen. Because interrupt may be shared, we may be
@@ -4631,7 +4631,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
                 * if tg3_poll() is not scheduled.
                 */
                tp->last_tag = sblk->status_tag;
-               __netif_rx_schedule(dev, &tp->napi);
+               __netif_rx_schedule(&tp->napi);
        }
 out:
        return IRQ_RETVAL(handled);
index 271bc230c8a9145ee9b24a302cc7a902d4216d62..75461dbd487657bd8e6a8002499db7c54fe2d774 100644 (file)
@@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
 
        if (num_received < budget) {
                data->rxpending = 0;
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
                TSI_WRITE(TSI108_EC_INTMASK,
                                     TSI_READ(TSI108_EC_INTMASK)
@@ -919,7 +919,7 @@ static void tsi108_rx_int(struct net_device *dev)
         * from tsi108_check_rxring().
         */
 
-       if (netif_rx_schedule_prep(dev, &data->napi)) {
+       if (netif_rx_schedule_prep(&data->napi)) {
                /* Mask, rather than ack, the receive interrupts.  The ack
                 * will happen in tsi108_poll().
                 */
@@ -930,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev)
                                     | TSI108_INT_RXTHRESH |
                                     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
                                     TSI108_INT_RXWAIT);
-               __netif_rx_schedule(dev, &data->napi);
+               __netif_rx_schedule(&data->napi);
        } else {
                if (!netif_running(dev)) {
                        /* This can happen if an interrupt occurs while the
index 739d610d18c5e6ab7958e52e545405a12b1babe3..6c3428a37c0b9d5c672e777b052a199710d01c97 100644 (file)
@@ -103,7 +103,7 @@ void oom_timer(unsigned long data)
 {
         struct net_device *dev = (struct net_device *)data;
        struct tulip_private *tp = netdev_priv(dev);
-       netif_rx_schedule(dev, &tp->napi);
+       netif_rx_schedule(&tp->napi);
 }
 
 int tulip_poll(struct napi_struct *napi, int budget)
@@ -300,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
 
          /* Remove us from polling list and enable RX intr. */
 
-         netif_rx_complete(dev, napi);
+         netif_rx_complete(napi);
          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
          /* The last op happens after poll completion. Which means the following:
@@ -336,7 +336,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
           * before we did netif_rx_complete(). See? We would lose it. */
 
          /* remove ourselves from the polling list */
-         netif_rx_complete(dev, napi);
+         netif_rx_complete(napi);
 
          return work_done;
 }
@@ -519,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
                        rxd++;
                        /* Mask RX intrs and add the device to poll list. */
                        iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
-                       netif_rx_schedule(dev, &tp->napi);
+                       netif_rx_schedule(&tp->napi);
 
                        if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
                                break;
index 5386d9b73e6aa78798138e703c9fc520dcea0cbf..0009f4e34433ee9f9130ca2c1a8122df612f07b7 100644 (file)
@@ -1755,7 +1755,6 @@ static int
 typhoon_poll(struct napi_struct *napi, int budget)
 {
        struct typhoon *tp = container_of(napi, struct typhoon, napi);
-       struct net_device *dev = tp->dev;
        struct typhoon_indexes *indexes = tp->indexes;
        int work_done;
 
@@ -1784,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                iowrite32(TYPHOON_INTR_NONE,
                                tp->ioaddr + TYPHOON_REG_INTR_MASK);
                typhoon_post_pci_writes(tp->ioaddr);
@@ -1807,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance)
 
        iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
 
-       if (netif_rx_schedule_prep(dev, &tp->napi)) {
+       if (netif_rx_schedule_prep(&tp->napi)) {
                iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
                typhoon_post_pci_writes(ioaddr);
-               __netif_rx_schedule(dev, &tp->napi);
+               __netif_rx_schedule(&tp->napi);
        } else {
                printk(KERN_ERR "%s: Error, poll already scheduled\n",
                        dev->name);
index 5c82f147f151bf1bec6870b955d7b0aa64891059..78a2ede19c5e3ebb8eba25e98ad3e654c616d6a8 100644 (file)
@@ -3330,7 +3330,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
                struct ucc_fast_private *uccf;
                u32 uccm;
 
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
                uccf = ugeth->uccf;
                uccm = in_be32(uccf->p_uccm);
                uccm |= UCCE_RX_EVENTS;
@@ -3364,10 +3364,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
 
        /* check for receive events that require processing */
        if (ucce & UCCE_RX_EVENTS) {
-               if (netif_rx_schedule_prep(dev, &ugeth->napi)) {
+               if (netif_rx_schedule_prep(&ugeth->napi)) {
                        uccm &= ~UCCE_RX_EVENTS;
                        out_be32(uccf->p_uccm, uccm);
-                       __netif_rx_schedule(dev, &ugeth->napi);
+                       __netif_rx_schedule(&ugeth->napi);
                }
        }
 
index 8d405c83df8b8b882e2df8294e3567d8083269ca..ac07cc6e3cb214eb7a56336205836e0b50bdfb1c 100644 (file)
@@ -589,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
        work_done = rhine_rx(dev, budget);
 
        if (work_done < budget) {
-               netif_rx_complete(dev, napi);
+               netif_rx_complete(napi);
 
                iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
                          IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1318,7 +1318,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
                                  IntrPCIErr | IntrStatsMax | IntrLinkChange,
                                  ioaddr + IntrEnable);
 
-                       netif_rx_schedule(dev, &rp->napi);
+                       netif_rx_schedule(&rp->napi);
                }
 
                if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
index 71ca29cc184d84e4c4fdd0cbbda8656c3da6801a..b7004ff36451764278e47eac267ffb4cd6bcd4bf 100644 (file)
@@ -374,9 +374,9 @@ static void skb_recv_done(struct virtqueue *rvq)
 {
        struct virtnet_info *vi = rvq->vdev->priv;
        /* Schedule NAPI, Suppress further interrupts if successful. */
-       if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
+       if (netif_rx_schedule_prep(&vi->napi)) {
                rvq->vq_ops->disable_cb(rvq);
-               __netif_rx_schedule(vi->dev, &vi->napi);
+               __netif_rx_schedule(&vi->napi);
        }
 }
 
@@ -402,11 +402,11 @@ again:
 
        /* Out of packets? */
        if (received < budget) {
-               netif_rx_complete(vi->dev, napi);
+               netif_rx_complete(napi);
                if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
                    && napi_schedule_prep(napi)) {
                        vi->rvq->vq_ops->disable_cb(vi->rvq);
-                       __netif_rx_schedule(vi->dev, napi);
+                       __netif_rx_schedule(napi);
                        goto again;
                }
        }
@@ -580,9 +580,9 @@ static int virtnet_open(struct net_device *dev)
         * won't get another interrupt, so process any outstanding packets
         * now.  virtnet_poll wants re-enable the queue, so we disable here.
         * We synchronize against interrupts via NAPI_STATE_SCHED */
-       if (netif_rx_schedule_prep(dev, &vi->napi)) {
+       if (netif_rx_schedule_prep(&vi->napi)) {
                vi->rvq->vq_ops->disable_cb(vi->rvq);
-               __netif_rx_schedule(dev, &vi->napi);
+               __netif_rx_schedule(&vi->napi);
        }
        return 0;
 }
index 0bcc0b5f22d7ef06b9890a0eb9a53ef65ba7cce4..08b3536944fe034544c74dc2470d7745393b89aa 100644 (file)
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
                received = sca_rx_done(port, budget);
 
        if (received < budget) {
-               netif_rx_complete(port->netdev, napi);
+               netif_rx_complete(napi);
                enable_intr(port);
        }
 
@@ -359,7 +359,7 @@ static irqreturn_t sca_intr(int irq, void *dev_id)
                if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
                        handled = 1;
                        disable_intr(port);
-                       netif_rx_schedule(port->netdev, &port->napi);
+                       netif_rx_schedule(&port->napi);
                }
        }
 
index fe376fde4e897ec7fd096fcc8f337e3405a75905..761635be9104f155af6978a72a8283ebec2cde9e 100644 (file)
@@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data)
 {
        struct net_device *dev = (struct net_device *)data;
        struct netfront_info *np = netdev_priv(dev);
-       netif_rx_schedule(dev, &np->napi);
+       netif_rx_schedule(&np->napi);
 }
 
 static int netfront_tx_slot_available(struct netfront_info *np)
@@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev)
                xennet_alloc_rx_buffers(dev);
                np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
                if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
-                       netif_rx_schedule(dev, &np->napi);
+                       netif_rx_schedule(&np->napi);
        }
        spin_unlock_bh(&np->rx_lock);
 
@@ -979,7 +979,7 @@ err:
 
                RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
                if (!more_to_do)
-                       __netif_rx_complete(dev, napi);
+                       __netif_rx_complete(napi);
 
                local_irq_restore(flags);
        }
@@ -1310,7 +1310,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
                xennet_tx_buf_gc(dev);
                /* Under tx_lock: protects access to rx shared-ring indexes. */
                if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
-                       netif_rx_schedule(dev, &np->napi);
+                       netif_rx_schedule(&np->napi);
        }
 
        spin_unlock_irqrestore(&np->tx_lock, flags);
index 58856b6737fb65087d88bfe15d2b7fe129262581..41e1224651cf98dacc97acf74d999a7c89233ec4 100644 (file)
@@ -1555,8 +1555,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 }
 
 /* Test if receive needs to be scheduled but only if up */
-static inline int netif_rx_schedule_prep(struct net_device *dev,
-                                        struct napi_struct *napi)
+static inline int netif_rx_schedule_prep(struct napi_struct *napi)
 {
        return napi_schedule_prep(napi);
 }
@@ -1564,27 +1563,24 @@ static inline int netif_rx_schedule_prep(struct net_device *dev,
 /* Add interface to tail of rx poll list. This assumes that _prep has
  * already been called and returned 1.
  */
-static inline void __netif_rx_schedule(struct net_device *dev,
-                                      struct napi_struct *napi)
+static inline void __netif_rx_schedule(struct napi_struct *napi)
 {
        __napi_schedule(napi);
 }
 
 /* Try to reschedule poll. Called by irq handler. */
 
-static inline void netif_rx_schedule(struct net_device *dev,
-                                    struct napi_struct *napi)
+static inline void netif_rx_schedule(struct napi_struct *napi)
 {
-       if (netif_rx_schedule_prep(dev, napi))
-               __netif_rx_schedule(dev, napi);
+       if (netif_rx_schedule_prep(napi))
+               __netif_rx_schedule(napi);
 }
 
 /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().  */
-static inline int netif_rx_reschedule(struct net_device *dev,
-                                     struct napi_struct *napi)
+static inline int netif_rx_reschedule(struct napi_struct *napi)
 {
        if (napi_schedule_prep(napi)) {
-               __netif_rx_schedule(dev, napi);
+               __netif_rx_schedule(napi);
                return 1;
        }
        return 0;
@@ -1593,8 +1589,7 @@ static inline int netif_rx_reschedule(struct net_device *dev,
 /* same as netif_rx_complete, except that local_irq_save(flags)
  * has already been issued
  */
-static inline void __netif_rx_complete(struct net_device *dev,
-                                      struct napi_struct *napi)
+static inline void __netif_rx_complete(struct napi_struct *napi)
 {
        __napi_complete(napi);
 }
@@ -1604,8 +1599,7 @@ static inline void __netif_rx_complete(struct net_device *dev,
  * it completes the work. The device cannot be out of poll list at this
  * moment, it is BUG().
  */
-static inline void netif_rx_complete(struct net_device *dev,
-                                    struct napi_struct *napi)
+static inline void netif_rx_complete(struct napi_struct *napi)
 {
        napi_complete(napi);
 }