struct fec_ptp_private *ptp_priv;
uint ptimer_present;
+
+ struct napi_struct napi;
+ int napi_weight;
+ bool use_napi;
};
+#define FEC_NAPI_WEIGHT 64
+#ifdef CONFIG_FEC_NAPI
+#define FEC_NAPI_ENABLE TRUE
+#else
+#define FEC_NAPI_ENABLE FALSE
+#endif
+
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
+static int fec_rx_poll(struct napi_struct *napi, int budget);
static void fec_enet_rx(struct net_device *dev);
static int fec_enet_close(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
netif_wake_queue(ndev);
}
+static void
+fec_rx_int_is_enabled(struct net_device *ndev, bool enabled)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+
+ int_events = readl(fep->hwp + FEC_IMASK);
+ if (enabled)
+ int_events |= FEC_ENET_RXF;
+ else
+ int_events &= ~FEC_ENET_RXF;
+ writel(int_events, fep->hwp + FEC_IMASK);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fec_enet_netpoll(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ fec_enet_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
static void
fec_enet_tx(struct net_device *ndev)
{
spin_unlock(&fep->hw_lock);
}
+/*NAPI polling Receive packets */
+static int fec_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct fec_enet_private *fep =
+ container_of(napi, struct fec_enet_private, napi);
+ struct net_device *ndev = napi->dev;
+ struct fec_ptp_private *fpp = fep->ptp_priv;
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int pkt_received = 0;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+ ushort pkt_len;
+ __u8 *data;
+
+ if (fep->use_napi)
+ WARN_ON(!budget);
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
+
+ /* First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ /* Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((status & BD_ENET_RX_LAST) == 0)
+ dev_err(&ndev->dev, "FEC ENET: rcv is not +last\n");
+
+ if (!fep->opened)
+ goto rx_processing_done;
+
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ ndev->stats.rx_errors++;
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ }
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
+ ndev->stats.rx_frame_errors++;
+ if (status & BD_ENET_RX_CR) /* CRC Error */
+ ndev->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ }
+
+ /* Report late collisions as a frame error.
+ * On this error, the BD is closed, but we don't know what we
+ * have in the buffer. So, just drop this frame on the floor.
+ */
+ if (status & BD_ENET_RX_CL) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = bdp->cbd_datlen;
+ ndev->stats.rx_bytes += pkt_len;
+ data = (__u8 *)__va(bdp->cbd_bufaddr);
+
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&ndev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, pkt_len);
+
+ /* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
+
+ if (unlikely(!skb)) {
+ dev_err(&ndev->dev,
+ "%s: Memory squeeze, dropping packet.\n", ndev->name);
+ ndev->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len - 4); /* Make room */
+ skb_copy_to_linear_data(skb, data, pkt_len - 4);
+ /* 1588 messeage TS handle */
+ if (fep->ptimer_present)
+ fec_ptp_store_rxstamp(fpp, skb, bdp);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&ndev->dev, data,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
+#ifdef CONFIG_ENHANCED_BD
+ bdp->cbd_esc = BD_ENET_RX_INT;
+ bdp->cbd_prot = 0;
+ bdp->cbd_bdu = 0;
+#endif
+
+ /* Update BD pointer to next entry */
+ if (status & BD_ENET_RX_WRAP)
+ bdp = fep->rx_bd_base;
+ else
+ bdp++;
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ }
+ fep->cur_rx = bdp;
+
+ if (pkt_received < budget) {
+ napi_complete(napi);
+ fec_rx_int_is_enabled(ndev, true);
+ }
+
+ return pkt_received;
+}
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
flush_cache_all();
#endif
- spin_lock(&fep->hw_lock);
-
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
}
fep->cur_rx = bdp;
-
- spin_unlock(&fep->hw_lock);
}
static irqreturn_t
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_ptp_private *fpp = fep->ptp_priv;
uint int_events;
+ ulong flags;
irqreturn_t ret = IRQ_NONE;
do {
if (int_events & FEC_ENET_RXF) {
ret = IRQ_HANDLED;
- fec_enet_rx(ndev);
+ spin_lock_irqsave(&fep->hw_lock, flags);
+
+ if (fep->use_napi) {
+ /* Disable the RX interrupt */
+ if (napi_schedule_prep(&fep->napi)) {
+ fec_rx_int_is_enabled(ndev, false);
+ __napi_schedule(&fep->napi);
+ }
+ } else
+ fec_enet_rx(ndev);
+
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
}
/* Transmit OK, or non-fatal error. Update the buffer
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
+ if (fep->use_napi)
+ napi_enable(&fep->napi);
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_do_ioctl = fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fec_enet_netpoll,
+#endif
};
/*
ndev->netdev_ops = &fec_netdev_ops;
ndev->ethtool_ops = &fec_enet_ethtool_ops;
+ fep->use_napi = FEC_NAPI_ENABLE;
+ fep->napi_weight = FEC_NAPI_WEIGHT;
+ if (fep->use_napi) {
+ fec_rx_int_is_enabled(ndev, false);
+ netif_napi_add(ndev, &fep->napi, fec_rx_poll, fep->napi_weight);
+ }
+
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {