1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/init.h>
74 #include <linux/delay.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_vlan.h>
79 #include <linux/spinlock.h>
81 #include <linux/of_address.h>
82 #include <linux/of_irq.h>
83 #include <linux/of_mdio.h>
84 #include <linux/of_platform.h>
86 #include <linux/tcp.h>
87 #include <linux/udp.h>
89 #include <linux/net_tstamp.h>
93 #include <asm/mpc85xx.h>
95 #include <asm/uaccess.h>
96 #include <linux/module.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/crc32.h>
99 #include <linux/mii.h>
100 #include <linux/phy.h>
101 #include <linux/phy_fixed.h>
102 #include <linux/of.h>
103 #include <linux/of_net.h>
107 #define TX_TIMEOUT (1*HZ)
109 const char gfar_driver_version[] = "1.3";
111 static int gfar_enet_open(struct net_device *dev);
112 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
113 static void gfar_reset_task(struct work_struct *work);
114 static void gfar_timeout(struct net_device *dev);
115 static int gfar_close(struct net_device *dev);
116 struct sk_buff *gfar_new_skb(struct net_device *dev);
117 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
118 struct sk_buff *skb);
119 static int gfar_set_mac_address(struct net_device *dev);
120 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
121 static irqreturn_t gfar_error(int irq, void *dev_id);
122 static irqreturn_t gfar_transmit(int irq, void *dev_id);
123 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
124 static void adjust_link(struct net_device *dev);
125 static void init_registers(struct net_device *dev);
126 static int init_phy(struct net_device *dev);
127 static int gfar_probe(struct platform_device *ofdev);
128 static int gfar_remove(struct platform_device *ofdev);
129 static void free_skb_resources(struct gfar_private *priv);
130 static void gfar_set_multi(struct net_device *dev);
131 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
132 static void gfar_configure_serdes(struct net_device *dev);
133 static int gfar_poll(struct napi_struct *napi, int budget);
134 static int gfar_poll_sq(struct napi_struct *napi, int budget);
135 #ifdef CONFIG_NET_POLL_CONTROLLER
136 static void gfar_netpoll(struct net_device *dev);
138 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
139 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
140 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
141 int amount_pull, struct napi_struct *napi);
142 void gfar_halt(struct net_device *dev);
143 static void gfar_halt_nodisable(struct net_device *dev);
144 void gfar_start(struct net_device *dev);
145 static void gfar_clear_exact_match(struct net_device *dev);
146 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
150 MODULE_AUTHOR("Freescale Semiconductor, Inc");
151 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
152 MODULE_LICENSE("GPL");
154 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
161 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
162 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
163 lstatus |= BD_LFLAG(RXBD_WRAP);
167 bdp->lstatus = lstatus;
170 static int gfar_init_bds(struct net_device *ndev)
172 struct gfar_private *priv = netdev_priv(ndev);
173 struct gfar_priv_tx_q *tx_queue = NULL;
174 struct gfar_priv_rx_q *rx_queue = NULL;
179 for (i = 0; i < priv->num_tx_queues; i++) {
180 tx_queue = priv->tx_queue[i];
181 /* Initialize some variables in our dev structure */
182 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
183 tx_queue->dirty_tx = tx_queue->tx_bd_base;
184 tx_queue->cur_tx = tx_queue->tx_bd_base;
185 tx_queue->skb_curtx = 0;
186 tx_queue->skb_dirtytx = 0;
188 /* Initialize Transmit Descriptor Ring */
189 txbdp = tx_queue->tx_bd_base;
190 for (j = 0; j < tx_queue->tx_ring_size; j++) {
196 /* Set the last descriptor in the ring to indicate wrap */
198 txbdp->status |= TXBD_WRAP;
201 for (i = 0; i < priv->num_rx_queues; i++) {
202 rx_queue = priv->rx_queue[i];
203 rx_queue->cur_rx = rx_queue->rx_bd_base;
204 rx_queue->skb_currx = 0;
205 rxbdp = rx_queue->rx_bd_base;
207 for (j = 0; j < rx_queue->rx_ring_size; j++) {
208 struct sk_buff *skb = rx_queue->rx_skbuff[j];
211 gfar_init_rxbdp(rx_queue, rxbdp,
214 skb = gfar_new_skb(ndev);
216 netdev_err(ndev, "Can't allocate RX buffers\n");
219 rx_queue->rx_skbuff[j] = skb;
221 gfar_new_rxbdp(rx_queue, rxbdp, skb);
232 static int gfar_alloc_skb_resources(struct net_device *ndev)
237 struct gfar_private *priv = netdev_priv(ndev);
238 struct device *dev = priv->dev;
239 struct gfar_priv_tx_q *tx_queue = NULL;
240 struct gfar_priv_rx_q *rx_queue = NULL;
242 priv->total_tx_ring_size = 0;
243 for (i = 0; i < priv->num_tx_queues; i++)
244 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
246 priv->total_rx_ring_size = 0;
247 for (i = 0; i < priv->num_rx_queues; i++)
248 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
250 /* Allocate memory for the buffer descriptors */
251 vaddr = dma_alloc_coherent(dev,
252 (priv->total_tx_ring_size *
253 sizeof(struct txbd8)) +
254 (priv->total_rx_ring_size *
255 sizeof(struct rxbd8)),
260 for (i = 0; i < priv->num_tx_queues; i++) {
261 tx_queue = priv->tx_queue[i];
262 tx_queue->tx_bd_base = vaddr;
263 tx_queue->tx_bd_dma_base = addr;
264 tx_queue->dev = ndev;
265 /* enet DMA only understands physical addresses */
266 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
267 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
270 /* Start the rx descriptor ring where the tx ring leaves off */
271 for (i = 0; i < priv->num_rx_queues; i++) {
272 rx_queue = priv->rx_queue[i];
273 rx_queue->rx_bd_base = vaddr;
274 rx_queue->rx_bd_dma_base = addr;
275 rx_queue->dev = ndev;
276 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
277 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
280 /* Setup the skbuff rings */
281 for (i = 0; i < priv->num_tx_queues; i++) {
282 tx_queue = priv->tx_queue[i];
283 tx_queue->tx_skbuff =
284 kmalloc_array(tx_queue->tx_ring_size,
285 sizeof(*tx_queue->tx_skbuff),
287 if (!tx_queue->tx_skbuff)
290 for (k = 0; k < tx_queue->tx_ring_size; k++)
291 tx_queue->tx_skbuff[k] = NULL;
294 for (i = 0; i < priv->num_rx_queues; i++) {
295 rx_queue = priv->rx_queue[i];
296 rx_queue->rx_skbuff =
297 kmalloc_array(rx_queue->rx_ring_size,
298 sizeof(*rx_queue->rx_skbuff),
300 if (!rx_queue->rx_skbuff)
303 for (j = 0; j < rx_queue->rx_ring_size; j++)
304 rx_queue->rx_skbuff[j] = NULL;
307 if (gfar_init_bds(ndev))
313 free_skb_resources(priv);
317 static void gfar_init_tx_rx_base(struct gfar_private *priv)
319 struct gfar __iomem *regs = priv->gfargrp[0].regs;
323 baddr = ®s->tbase0;
324 for (i = 0; i < priv->num_tx_queues; i++) {
325 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
329 baddr = ®s->rbase0;
330 for (i = 0; i < priv->num_rx_queues; i++) {
331 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
336 static void gfar_init_mac(struct net_device *ndev)
338 struct gfar_private *priv = netdev_priv(ndev);
339 struct gfar __iomem *regs = priv->gfargrp[0].regs;
344 /* write the tx/rx base registers */
345 gfar_init_tx_rx_base(priv);
347 /* Configure the coalescing support */
348 gfar_configure_coalescing_all(priv);
350 /* set this when rx hw offload (TOE) functions are being used */
351 priv->uses_rxfcb = 0;
353 if (priv->rx_filer_enable) {
354 rctrl |= RCTRL_FILREN;
355 /* Program the RIR0 reg with the required distribution */
356 gfar_write(®s->rir0, DEFAULT_RIR0);
359 /* Restore PROMISC mode */
360 if (ndev->flags & IFF_PROMISC)
363 if (ndev->features & NETIF_F_RXCSUM) {
364 rctrl |= RCTRL_CHECKSUMMING;
365 priv->uses_rxfcb = 1;
368 if (priv->extended_hash) {
369 rctrl |= RCTRL_EXTHASH;
371 gfar_clear_exact_match(ndev);
376 rctrl &= ~RCTRL_PAL_MASK;
377 rctrl |= RCTRL_PADDING(priv->padding);
380 /* Insert receive time stamps into padding alignment bytes */
381 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
382 rctrl &= ~RCTRL_PAL_MASK;
383 rctrl |= RCTRL_PADDING(8);
387 /* Enable HW time stamping if requested from user space */
388 if (priv->hwts_rx_en) {
389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
390 priv->uses_rxfcb = 1;
393 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
394 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
395 priv->uses_rxfcb = 1;
398 /* Init rctrl based on our settings */
399 gfar_write(®s->rctrl, rctrl);
401 if (ndev->features & NETIF_F_IP_CSUM)
402 tctrl |= TCTRL_INIT_CSUM;
404 if (priv->prio_sched_en)
405 tctrl |= TCTRL_TXSCHED_PRIO;
407 tctrl |= TCTRL_TXSCHED_WRRS;
408 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
409 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
412 gfar_write(®s->tctrl, tctrl);
414 /* Set the extraction length and index */
415 attrs = ATTRELI_EL(priv->rx_stash_size) |
416 ATTRELI_EI(priv->rx_stash_index);
418 gfar_write(®s->attreli, attrs);
420 /* Start with defaults, and add stashing or locking
421 * depending on the approprate variables
423 attrs = ATTR_INIT_SETTINGS;
425 if (priv->bd_stash_en)
426 attrs |= ATTR_BDSTASH;
428 if (priv->rx_stash_size != 0)
429 attrs |= ATTR_BUFSTASH;
431 gfar_write(®s->attr, attrs);
433 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold);
434 gfar_write(®s->fifo_tx_starve, priv->fifo_starve);
435 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off);
438 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
440 struct gfar_private *priv = netdev_priv(dev);
441 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
442 unsigned long tx_packets = 0, tx_bytes = 0;
445 for (i = 0; i < priv->num_rx_queues; i++) {
446 rx_packets += priv->rx_queue[i]->stats.rx_packets;
447 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
448 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
451 dev->stats.rx_packets = rx_packets;
452 dev->stats.rx_bytes = rx_bytes;
453 dev->stats.rx_dropped = rx_dropped;
455 for (i = 0; i < priv->num_tx_queues; i++) {
456 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
457 tx_packets += priv->tx_queue[i]->stats.tx_packets;
460 dev->stats.tx_bytes = tx_bytes;
461 dev->stats.tx_packets = tx_packets;
466 static const struct net_device_ops gfar_netdev_ops = {
467 .ndo_open = gfar_enet_open,
468 .ndo_start_xmit = gfar_start_xmit,
469 .ndo_stop = gfar_close,
470 .ndo_change_mtu = gfar_change_mtu,
471 .ndo_set_features = gfar_set_features,
472 .ndo_set_rx_mode = gfar_set_multi,
473 .ndo_tx_timeout = gfar_timeout,
474 .ndo_do_ioctl = gfar_ioctl,
475 .ndo_get_stats = gfar_get_stats,
476 .ndo_set_mac_address = eth_mac_addr,
477 .ndo_validate_addr = eth_validate_addr,
478 #ifdef CONFIG_NET_POLL_CONTROLLER
479 .ndo_poll_controller = gfar_netpoll,
483 void lock_rx_qs(struct gfar_private *priv)
487 for (i = 0; i < priv->num_rx_queues; i++)
488 spin_lock(&priv->rx_queue[i]->rxlock);
491 void lock_tx_qs(struct gfar_private *priv)
495 for (i = 0; i < priv->num_tx_queues; i++)
496 spin_lock(&priv->tx_queue[i]->txlock);
499 void unlock_rx_qs(struct gfar_private *priv)
503 for (i = 0; i < priv->num_rx_queues; i++)
504 spin_unlock(&priv->rx_queue[i]->rxlock);
507 void unlock_tx_qs(struct gfar_private *priv)
511 for (i = 0; i < priv->num_tx_queues; i++)
512 spin_unlock(&priv->tx_queue[i]->txlock);
515 static void free_tx_pointers(struct gfar_private *priv)
519 for (i = 0; i < priv->num_tx_queues; i++)
520 kfree(priv->tx_queue[i]);
523 static void free_rx_pointers(struct gfar_private *priv)
527 for (i = 0; i < priv->num_rx_queues; i++)
528 kfree(priv->rx_queue[i]);
531 static void unmap_group_regs(struct gfar_private *priv)
535 for (i = 0; i < MAXGROUPS; i++)
536 if (priv->gfargrp[i].regs)
537 iounmap(priv->gfargrp[i].regs);
540 static void free_gfar_dev(struct gfar_private *priv)
544 for (i = 0; i < priv->num_grps; i++)
545 for (j = 0; j < GFAR_NUM_IRQS; j++) {
546 kfree(priv->gfargrp[i].irqinfo[j]);
547 priv->gfargrp[i].irqinfo[j] = NULL;
550 free_netdev(priv->ndev);
553 static void disable_napi(struct gfar_private *priv)
557 for (i = 0; i < priv->num_grps; i++)
558 napi_disable(&priv->gfargrp[i].napi);
561 static void enable_napi(struct gfar_private *priv)
565 for (i = 0; i < priv->num_grps; i++)
566 napi_enable(&priv->gfargrp[i].napi);
569 static int gfar_parse_group(struct device_node *np,
570 struct gfar_private *priv, const char *model)
572 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
576 for (i = 0; i < GFAR_NUM_IRQS; i++) {
577 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
579 if (!grp->irqinfo[i])
583 grp->regs = of_iomap(np, 0);
587 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
589 /* If we aren't the FEC we have multiple interrupts */
590 if (model && strcasecmp(model, "FEC")) {
591 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
592 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
593 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
594 gfar_irq(grp, RX)->irq == NO_IRQ ||
595 gfar_irq(grp, ER)->irq == NO_IRQ)
600 spin_lock_init(&grp->grplock);
601 if (priv->mode == MQ_MG_MODE) {
602 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
603 grp->rx_bit_map = queue_mask ?
604 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
605 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
606 grp->tx_bit_map = queue_mask ?
607 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
609 grp->rx_bit_map = 0xFF;
610 grp->tx_bit_map = 0xFF;
617 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
621 const void *mac_addr;
623 struct net_device *dev = NULL;
624 struct gfar_private *priv = NULL;
625 struct device_node *np = ofdev->dev.of_node;
626 struct device_node *child = NULL;
628 const u32 *stash_len;
629 const u32 *stash_idx;
630 unsigned int num_tx_qs, num_rx_qs;
631 u32 *tx_queues, *rx_queues;
633 if (!np || !of_device_is_available(np))
636 /* parse the num of tx and rx queues */
637 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
638 num_tx_qs = tx_queues ? *tx_queues : 1;
640 if (num_tx_qs > MAX_TX_QS) {
641 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
642 num_tx_qs, MAX_TX_QS);
643 pr_err("Cannot do alloc_etherdev, aborting\n");
647 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
648 num_rx_qs = rx_queues ? *rx_queues : 1;
650 if (num_rx_qs > MAX_RX_QS) {
651 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
652 num_rx_qs, MAX_RX_QS);
653 pr_err("Cannot do alloc_etherdev, aborting\n");
657 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
662 priv = netdev_priv(dev);
665 priv->num_tx_queues = num_tx_qs;
666 netif_set_real_num_rx_queues(dev, num_rx_qs);
667 priv->num_rx_queues = num_rx_qs;
668 priv->num_grps = 0x0;
670 /* Init Rx queue filer rule set linked list */
671 INIT_LIST_HEAD(&priv->rx_list.list);
672 priv->rx_list.count = 0;
673 mutex_init(&priv->rx_queue_access);
675 model = of_get_property(np, "model", NULL);
677 for (i = 0; i < MAXGROUPS; i++)
678 priv->gfargrp[i].regs = NULL;
680 /* Parse and initialize group specific information */
681 if (of_device_is_compatible(np, "fsl,etsec2")) {
682 priv->mode = MQ_MG_MODE;
683 for_each_child_of_node(np, child) {
684 err = gfar_parse_group(child, priv, model);
689 priv->mode = SQ_SG_MODE;
690 err = gfar_parse_group(np, priv, model);
695 for (i = 0; i < priv->num_tx_queues; i++)
696 priv->tx_queue[i] = NULL;
697 for (i = 0; i < priv->num_rx_queues; i++)
698 priv->rx_queue[i] = NULL;
700 for (i = 0; i < priv->num_tx_queues; i++) {
701 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
703 if (!priv->tx_queue[i]) {
705 goto tx_alloc_failed;
707 priv->tx_queue[i]->tx_skbuff = NULL;
708 priv->tx_queue[i]->qindex = i;
709 priv->tx_queue[i]->dev = dev;
710 spin_lock_init(&(priv->tx_queue[i]->txlock));
713 for (i = 0; i < priv->num_rx_queues; i++) {
714 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
716 if (!priv->rx_queue[i]) {
718 goto rx_alloc_failed;
720 priv->rx_queue[i]->rx_skbuff = NULL;
721 priv->rx_queue[i]->qindex = i;
722 priv->rx_queue[i]->dev = dev;
723 spin_lock_init(&(priv->rx_queue[i]->rxlock));
727 stash = of_get_property(np, "bd-stash", NULL);
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
731 priv->bd_stash_en = 1;
734 stash_len = of_get_property(np, "rx-stash-len", NULL);
737 priv->rx_stash_size = *stash_len;
739 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
742 priv->rx_stash_index = *stash_idx;
744 if (stash_len || stash_idx)
745 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
747 mac_addr = of_get_mac_address(np);
750 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
752 if (model && !strcasecmp(model, "TSEC"))
753 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
754 FSL_GIANFAR_DEV_HAS_COALESCE |
755 FSL_GIANFAR_DEV_HAS_RMON |
756 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
758 if (model && !strcasecmp(model, "eTSEC"))
759 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
760 FSL_GIANFAR_DEV_HAS_COALESCE |
761 FSL_GIANFAR_DEV_HAS_RMON |
762 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
763 FSL_GIANFAR_DEV_HAS_PADDING |
764 FSL_GIANFAR_DEV_HAS_CSUM |
765 FSL_GIANFAR_DEV_HAS_VLAN |
766 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
767 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
768 FSL_GIANFAR_DEV_HAS_TIMER;
770 ctype = of_get_property(np, "phy-connection-type", NULL);
772 /* We only care about rgmii-id. The rest are autodetected */
773 if (ctype && !strcmp(ctype, "rgmii-id"))
774 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
776 priv->interface = PHY_INTERFACE_MODE_MII;
778 if (of_get_property(np, "fsl,magic-packet", NULL))
779 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
781 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
783 /* Find the TBI PHY. If it's not there, we don't support SGMII */
784 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
789 free_rx_pointers(priv);
791 free_tx_pointers(priv);
793 unmap_group_regs(priv);
798 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
799 struct ifreq *ifr, int cmd)
801 struct hwtstamp_config config;
802 struct gfar_private *priv = netdev_priv(netdev);
804 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
807 /* reserved for future extensions */
811 switch (config.tx_type) {
812 case HWTSTAMP_TX_OFF:
813 priv->hwts_tx_en = 0;
816 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
818 priv->hwts_tx_en = 1;
824 switch (config.rx_filter) {
825 case HWTSTAMP_FILTER_NONE:
826 if (priv->hwts_rx_en) {
828 priv->hwts_rx_en = 0;
829 startup_gfar(netdev);
833 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
835 if (!priv->hwts_rx_en) {
837 priv->hwts_rx_en = 1;
838 startup_gfar(netdev);
840 config.rx_filter = HWTSTAMP_FILTER_ALL;
844 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
848 /* Ioctl MII Interface */
849 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
851 struct gfar_private *priv = netdev_priv(dev);
853 if (!netif_running(dev))
856 if (cmd == SIOCSHWTSTAMP)
857 return gfar_hwtstamp_ioctl(dev, rq, cmd);
862 return phy_mii_ioctl(priv->phydev, rq, cmd);
865 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
867 unsigned int new_bit_map = 0x0;
868 int mask = 0x1 << (max_qs - 1), i;
870 for (i = 0; i < max_qs; i++) {
872 new_bit_map = new_bit_map + (1 << i);
878 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
881 u32 rqfpr = FPR_FILER_MASK;
885 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
886 priv->ftp_rqfpr[rqfar] = rqfpr;
887 priv->ftp_rqfcr[rqfar] = rqfcr;
888 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
891 rqfcr = RQFCR_CMP_NOMATCH;
892 priv->ftp_rqfpr[rqfar] = rqfpr;
893 priv->ftp_rqfcr[rqfar] = rqfcr;
894 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
897 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
899 priv->ftp_rqfcr[rqfar] = rqfcr;
900 priv->ftp_rqfpr[rqfar] = rqfpr;
901 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
904 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
906 priv->ftp_rqfcr[rqfar] = rqfcr;
907 priv->ftp_rqfpr[rqfar] = rqfpr;
908 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
913 static void gfar_init_filer_table(struct gfar_private *priv)
916 u32 rqfar = MAX_FILER_IDX;
918 u32 rqfpr = FPR_FILER_MASK;
921 rqfcr = RQFCR_CMP_MATCH;
922 priv->ftp_rqfcr[rqfar] = rqfcr;
923 priv->ftp_rqfpr[rqfar] = rqfpr;
924 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
926 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
927 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
928 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
929 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
930 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
931 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
933 /* cur_filer_idx indicated the first non-masked rule */
934 priv->cur_filer_idx = rqfar;
936 /* Rest are masked rules */
937 rqfcr = RQFCR_CMP_NOMATCH;
938 for (i = 0; i < rqfar; i++) {
939 priv->ftp_rqfcr[i] = rqfcr;
940 priv->ftp_rqfpr[i] = rqfpr;
941 gfar_write_filer(priv, i, rqfcr, rqfpr);
945 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
947 unsigned int pvr = mfspr(SPRN_PVR);
948 unsigned int svr = mfspr(SPRN_SVR);
949 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
950 unsigned int rev = svr & 0xffff;
952 /* MPC8313 Rev 2.0 and higher; All MPC837x */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
954 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
955 priv->errata |= GFAR_ERRATA_74;
957 /* MPC8313 and MPC837x all rev */
958 if ((pvr == 0x80850010 && mod == 0x80b0) ||
959 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
960 priv->errata |= GFAR_ERRATA_76;
962 /* MPC8313 Rev < 2.0 */
963 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
964 priv->errata |= GFAR_ERRATA_12;
967 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
969 unsigned int svr = mfspr(SPRN_SVR);
971 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
972 priv->errata |= GFAR_ERRATA_12;
973 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
974 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
975 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
978 static void gfar_detect_errata(struct gfar_private *priv)
980 struct device *dev = &priv->ofdev->dev;
982 /* no plans to fix */
983 priv->errata |= GFAR_ERRATA_A002;
985 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
986 __gfar_detect_errata_85xx(priv);
987 else /* non-mpc85xx parts, i.e. e300 core based */
988 __gfar_detect_errata_83xx(priv);
991 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
995 /* Set up the ethernet device structure, private data,
996 * and anything else we need before we start
998 static int gfar_probe(struct platform_device *ofdev)
1001 struct net_device *dev = NULL;
1002 struct gfar_private *priv = NULL;
1003 struct gfar __iomem *regs = NULL;
1004 int err = 0, i, grp_idx = 0;
1005 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
1009 err = gfar_of_init(ofdev, &dev);
1014 priv = netdev_priv(dev);
1016 priv->ofdev = ofdev;
1017 priv->dev = &ofdev->dev;
1018 SET_NETDEV_DEV(dev, &ofdev->dev);
1020 spin_lock_init(&priv->bflock);
1021 INIT_WORK(&priv->reset_task, gfar_reset_task);
1023 platform_set_drvdata(ofdev, priv);
1024 regs = priv->gfargrp[0].regs;
1026 gfar_detect_errata(priv);
1028 /* Stop the DMA engine now, in case it was running before
1029 * (The firmware could have used it, and left it running).
1033 /* Reset MAC layer */
1034 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1036 /* We need to delay at least 3 TX clocks */
1040 if (!priv->pause_aneg_en && priv->tx_pause_en)
1041 tempval |= MACCFG1_TX_FLOW;
1042 if (!priv->pause_aneg_en && priv->rx_pause_en)
1043 tempval |= MACCFG1_RX_FLOW;
1044 /* the soft reset bit is not self-resetting, so we need to
1045 * clear it before resuming normal operation
1047 gfar_write(®s->maccfg1, tempval);
1049 /* Initialize MACCFG2. */
1050 tempval = MACCFG2_INIT_SETTINGS;
1051 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1052 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1053 gfar_write(®s->maccfg2, tempval);
1055 /* Initialize ECNTRL */
1056 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1058 /* Set the dev->base_addr to the gfar reg region */
1059 dev->base_addr = (unsigned long) regs;
1061 /* Fill in the dev structure */
1062 dev->watchdog_timeo = TX_TIMEOUT;
1064 dev->netdev_ops = &gfar_netdev_ops;
1065 dev->ethtool_ops = &gfar_ethtool_ops;
1067 /* Register for napi ...We are registering NAPI for each grp */
1068 if (priv->mode == SQ_SG_MODE)
1069 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
1072 for (i = 0; i < priv->num_grps; i++)
1073 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1076 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1077 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1079 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1080 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1083 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1084 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1085 NETIF_F_HW_VLAN_CTAG_RX;
1086 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1089 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1090 priv->extended_hash = 1;
1091 priv->hash_width = 9;
1093 priv->hash_regs[0] = ®s->igaddr0;
1094 priv->hash_regs[1] = ®s->igaddr1;
1095 priv->hash_regs[2] = ®s->igaddr2;
1096 priv->hash_regs[3] = ®s->igaddr3;
1097 priv->hash_regs[4] = ®s->igaddr4;
1098 priv->hash_regs[5] = ®s->igaddr5;
1099 priv->hash_regs[6] = ®s->igaddr6;
1100 priv->hash_regs[7] = ®s->igaddr7;
1101 priv->hash_regs[8] = ®s->gaddr0;
1102 priv->hash_regs[9] = ®s->gaddr1;
1103 priv->hash_regs[10] = ®s->gaddr2;
1104 priv->hash_regs[11] = ®s->gaddr3;
1105 priv->hash_regs[12] = ®s->gaddr4;
1106 priv->hash_regs[13] = ®s->gaddr5;
1107 priv->hash_regs[14] = ®s->gaddr6;
1108 priv->hash_regs[15] = ®s->gaddr7;
1111 priv->extended_hash = 0;
1112 priv->hash_width = 8;
1114 priv->hash_regs[0] = ®s->gaddr0;
1115 priv->hash_regs[1] = ®s->gaddr1;
1116 priv->hash_regs[2] = ®s->gaddr2;
1117 priv->hash_regs[3] = ®s->gaddr3;
1118 priv->hash_regs[4] = ®s->gaddr4;
1119 priv->hash_regs[5] = ®s->gaddr5;
1120 priv->hash_regs[6] = ®s->gaddr6;
1121 priv->hash_regs[7] = ®s->gaddr7;
1124 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1125 priv->padding = DEFAULT_PADDING;
1129 if (dev->features & NETIF_F_IP_CSUM ||
1130 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1131 dev->needed_headroom = GMAC_FCB_LEN;
1133 /* Program the isrg regs only if number of grps > 1 */
1134 if (priv->num_grps > 1) {
1135 baddr = ®s->isrg0;
1136 for (i = 0; i < priv->num_grps; i++) {
1137 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1138 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1139 gfar_write(baddr, isrg);
1145 /* Need to reverse the bit maps as bit_map's MSB is q0
1146 * but, for_each_set_bit parses from right to left, which
1147 * basically reverses the queue numbers
1149 for (i = 0; i< priv->num_grps; i++) {
1150 priv->gfargrp[i].tx_bit_map =
1151 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1152 priv->gfargrp[i].rx_bit_map =
1153 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1156 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1157 * also assign queues to groups
1159 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1160 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1162 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1163 priv->num_rx_queues) {
1164 priv->gfargrp[grp_idx].num_rx_queues++;
1165 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1166 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1167 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1169 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1171 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1172 priv->num_tx_queues) {
1173 priv->gfargrp[grp_idx].num_tx_queues++;
1174 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1175 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1176 tqueue = tqueue | (TQUEUE_EN0 >> i);
1178 priv->gfargrp[grp_idx].rstat = rstat;
1179 priv->gfargrp[grp_idx].tstat = tstat;
1183 gfar_write(®s->rqueue, rqueue);
1184 gfar_write(®s->tqueue, tqueue);
1186 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1188 /* Initializing some of the rx/tx queue level parameters */
1189 for (i = 0; i < priv->num_tx_queues; i++) {
1190 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1191 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1192 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1193 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1196 for (i = 0; i < priv->num_rx_queues; i++) {
1197 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1198 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1199 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1202 /* always enable rx filer */
1203 priv->rx_filer_enable = 1;
1204 /* Enable most messages by default */
1205 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1206 /* use pritority h/w tx queue scheduling for single queue devices */
1207 if (priv->num_tx_queues == 1)
1208 priv->prio_sched_en = 1;
1210 /* Carrier starts down, phylib will bring it up */
1211 netif_carrier_off(dev);
1213 err = register_netdev(dev);
1216 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1220 device_init_wakeup(&dev->dev,
1221 priv->device_flags &
1222 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1224 /* fill out IRQ number and name fields */
1225 for (i = 0; i < priv->num_grps; i++) {
1226 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1227 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1228 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1229 dev->name, "_g", '0' + i, "_tx");
1230 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1231 dev->name, "_g", '0' + i, "_rx");
1232 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1233 dev->name, "_g", '0' + i, "_er");
1235 strcpy(gfar_irq(grp, TX)->name, dev->name);
1238 /* Initialize the filer table */
1239 gfar_init_filer_table(priv);
1241 /* Create all the sysfs files */
1242 gfar_init_sysfs(dev);
1244 /* Print out the device info */
1245 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1247 /* Even more device info helps when determining which kernel
1248 * provided which set of benchmarks.
1250 netdev_info(dev, "Running with NAPI enabled\n");
1251 for (i = 0; i < priv->num_rx_queues; i++)
1252 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1253 i, priv->rx_queue[i]->rx_ring_size);
1254 for (i = 0; i < priv->num_tx_queues; i++)
1255 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1256 i, priv->tx_queue[i]->tx_ring_size);
1261 unmap_group_regs(priv);
1262 free_tx_pointers(priv);
1263 free_rx_pointers(priv);
1265 of_node_put(priv->phy_node);
1267 of_node_put(priv->tbi_node);
1268 free_gfar_dev(priv);
1272 static int gfar_remove(struct platform_device *ofdev)
1274 struct gfar_private *priv = platform_get_drvdata(ofdev);
1277 of_node_put(priv->phy_node);
1279 of_node_put(priv->tbi_node);
1281 unregister_netdev(priv->ndev);
1282 unmap_group_regs(priv);
1283 free_gfar_dev(priv);
1290 static int gfar_suspend(struct device *dev)
1292 struct gfar_private *priv = dev_get_drvdata(dev);
1293 struct net_device *ndev = priv->ndev;
1294 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1295 unsigned long flags;
1298 int magic_packet = priv->wol_en &&
1299 (priv->device_flags &
1300 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1302 netif_device_detach(ndev);
1304 if (netif_running(ndev)) {
1306 local_irq_save(flags);
1310 gfar_halt_nodisable(ndev);
1312 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1313 tempval = gfar_read(®s->maccfg1);
1315 tempval &= ~MACCFG1_TX_EN;
1318 tempval &= ~MACCFG1_RX_EN;
1320 gfar_write(®s->maccfg1, tempval);
1324 local_irq_restore(flags);
1329 /* Enable interrupt on Magic Packet */
1330 gfar_write(®s->imask, IMASK_MAG);
1332 /* Enable Magic Packet mode */
1333 tempval = gfar_read(®s->maccfg2);
1334 tempval |= MACCFG2_MPEN;
1335 gfar_write(®s->maccfg2, tempval);
1337 phy_stop(priv->phydev);
1344 static int gfar_resume(struct device *dev)
1346 struct gfar_private *priv = dev_get_drvdata(dev);
1347 struct net_device *ndev = priv->ndev;
1348 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1349 unsigned long flags;
1351 int magic_packet = priv->wol_en &&
1352 (priv->device_flags &
1353 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1355 if (!netif_running(ndev)) {
1356 netif_device_attach(ndev);
1360 if (!magic_packet && priv->phydev)
1361 phy_start(priv->phydev);
1363 /* Disable Magic Packet mode, in case something
1366 local_irq_save(flags);
1370 tempval = gfar_read(®s->maccfg2);
1371 tempval &= ~MACCFG2_MPEN;
1372 gfar_write(®s->maccfg2, tempval);
1378 local_irq_restore(flags);
1380 netif_device_attach(ndev);
1387 static int gfar_restore(struct device *dev)
1389 struct gfar_private *priv = dev_get_drvdata(dev);
1390 struct net_device *ndev = priv->ndev;
1392 if (!netif_running(ndev)) {
1393 netif_device_attach(ndev);
1398 if (gfar_init_bds(ndev)) {
1399 free_skb_resources(priv);
1403 init_registers(ndev);
1404 gfar_set_mac_address(ndev);
1405 gfar_init_mac(ndev);
1410 priv->oldduplex = -1;
1413 phy_start(priv->phydev);
1415 netif_device_attach(ndev);
1421 static struct dev_pm_ops gfar_pm_ops = {
1422 .suspend = gfar_suspend,
1423 .resume = gfar_resume,
1424 .freeze = gfar_suspend,
1425 .thaw = gfar_resume,
1426 .restore = gfar_restore,
1429 #define GFAR_PM_OPS (&gfar_pm_ops)
1433 #define GFAR_PM_OPS NULL
1437 /* Reads the controller's registers to determine what interface
1438 * connects it to the PHY.
1440 static phy_interface_t gfar_get_interface(struct net_device *dev)
1442 struct gfar_private *priv = netdev_priv(dev);
1443 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1446 ecntrl = gfar_read(®s->ecntrl);
1448 if (ecntrl & ECNTRL_SGMII_MODE)
1449 return PHY_INTERFACE_MODE_SGMII;
1451 if (ecntrl & ECNTRL_TBI_MODE) {
1452 if (ecntrl & ECNTRL_REDUCED_MODE)
1453 return PHY_INTERFACE_MODE_RTBI;
1455 return PHY_INTERFACE_MODE_TBI;
1458 if (ecntrl & ECNTRL_REDUCED_MODE) {
1459 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1460 return PHY_INTERFACE_MODE_RMII;
1463 phy_interface_t interface = priv->interface;
1465 /* This isn't autodetected right now, so it must
1466 * be set by the device tree or platform code.
1468 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1469 return PHY_INTERFACE_MODE_RGMII_ID;
1471 return PHY_INTERFACE_MODE_RGMII;
1475 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1476 return PHY_INTERFACE_MODE_GMII;
1478 return PHY_INTERFACE_MODE_MII;
1482 /* Initializes driver's PHY state, and attaches to the PHY.
1483 * Returns 0 on success.
1485 static int init_phy(struct net_device *dev)
1487 struct gfar_private *priv = netdev_priv(dev);
1488 uint gigabit_support =
1489 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1490 GFAR_SUPPORTED_GBIT : 0;
1491 phy_interface_t interface;
1495 priv->oldduplex = -1;
1497 interface = gfar_get_interface(dev);
1499 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1502 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1504 if (!priv->phydev) {
1505 dev_err(&dev->dev, "could not attach to PHY\n");
1509 if (interface == PHY_INTERFACE_MODE_SGMII)
1510 gfar_configure_serdes(dev);
1512 /* Remove any features not supported by the controller */
1513 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1514 priv->phydev->advertising = priv->phydev->supported;
1519 /* Initialize TBI PHY interface for communicating with the
1520 * SERDES lynx PHY on the chip. We communicate with this PHY
1521 * through the MDIO bus on each controller, treating it as a
1522 * "normal" PHY at the address found in the TBIPA register. We assume
1523 * that the TBIPA register is valid. Either the MDIO bus code will set
1524 * it to a value that doesn't conflict with other PHYs on the bus, or the
1525 * value doesn't matter, as there are no other PHYs on the bus.
1527 static void gfar_configure_serdes(struct net_device *dev)
1529 struct gfar_private *priv = netdev_priv(dev);
1530 struct phy_device *tbiphy;
1532 if (!priv->tbi_node) {
1533 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1534 "device tree specify a tbi-handle\n");
1538 tbiphy = of_phy_find_device(priv->tbi_node);
1540 dev_err(&dev->dev, "error: Could not get TBI device\n");
1544 /* If the link is already up, we must already be ok, and don't need to
1545 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1546 * everything for us? Resetting it takes the link down and requires
1547 * several seconds for it to come back.
1549 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1552 /* Single clk mode, mii mode off(for serdes communication) */
1553 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1555 phy_write(tbiphy, MII_ADVERTISE,
1556 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1557 ADVERTISE_1000XPSE_ASYM);
1559 phy_write(tbiphy, MII_BMCR,
1560 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1564 static void init_registers(struct net_device *dev)
1566 struct gfar_private *priv = netdev_priv(dev);
1567 struct gfar __iomem *regs = NULL;
1570 for (i = 0; i < priv->num_grps; i++) {
1571 regs = priv->gfargrp[i].regs;
1573 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1575 /* Initialize IMASK */
1576 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1579 regs = priv->gfargrp[0].regs;
1580 /* Init hash registers to zero */
1581 gfar_write(®s->igaddr0, 0);
1582 gfar_write(®s->igaddr1, 0);
1583 gfar_write(®s->igaddr2, 0);
1584 gfar_write(®s->igaddr3, 0);
1585 gfar_write(®s->igaddr4, 0);
1586 gfar_write(®s->igaddr5, 0);
1587 gfar_write(®s->igaddr6, 0);
1588 gfar_write(®s->igaddr7, 0);
1590 gfar_write(®s->gaddr0, 0);
1591 gfar_write(®s->gaddr1, 0);
1592 gfar_write(®s->gaddr2, 0);
1593 gfar_write(®s->gaddr3, 0);
1594 gfar_write(®s->gaddr4, 0);
1595 gfar_write(®s->gaddr5, 0);
1596 gfar_write(®s->gaddr6, 0);
1597 gfar_write(®s->gaddr7, 0);
1599 /* Zero out the rmon mib registers if it has them */
1600 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1601 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1603 /* Mask off the CAM interrupts */
1604 gfar_write(®s->rmon.cam1, 0xffffffff);
1605 gfar_write(®s->rmon.cam2, 0xffffffff);
1608 /* Initialize the max receive buffer length */
1609 gfar_write(®s->mrblr, priv->rx_buffer_size);
1611 /* Initialize the Minimum Frame Length Register */
1612 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1615 static int __gfar_is_rx_idle(struct gfar_private *priv)
1619 /* Normaly TSEC should not hang on GRS commands, so we should
1620 * actually wait for IEVENT_GRSC flag.
1622 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1625 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1626 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1627 * and the Rx can be safely reset.
1629 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1631 if ((res & 0xffff) == (res >> 16))
1637 /* Halt the receive and transmit queues */
1638 static void gfar_halt_nodisable(struct net_device *dev)
1640 struct gfar_private *priv = netdev_priv(dev);
1641 struct gfar __iomem *regs = NULL;
1645 for (i = 0; i < priv->num_grps; i++) {
1646 regs = priv->gfargrp[i].regs;
1647 /* Mask all interrupts */
1648 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1650 /* Clear all interrupts */
1651 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1654 regs = priv->gfargrp[0].regs;
1655 /* Stop the DMA, and wait for it to stop */
1656 tempval = gfar_read(®s->dmactrl);
1657 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1658 (DMACTRL_GRS | DMACTRL_GTS)) {
1661 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1662 gfar_write(®s->dmactrl, tempval);
1665 ret = spin_event_timeout(((gfar_read(®s->ievent) &
1666 (IEVENT_GRSC | IEVENT_GTSC)) ==
1667 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1668 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC))
1669 ret = __gfar_is_rx_idle(priv);
1674 /* Halt the receive and transmit queues */
1675 void gfar_halt(struct net_device *dev)
1677 struct gfar_private *priv = netdev_priv(dev);
1678 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1681 gfar_halt_nodisable(dev);
1683 /* Disable Rx and Tx */
1684 tempval = gfar_read(®s->maccfg1);
1685 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1686 gfar_write(®s->maccfg1, tempval);
1689 static void free_grp_irqs(struct gfar_priv_grp *grp)
1691 free_irq(gfar_irq(grp, TX)->irq, grp);
1692 free_irq(gfar_irq(grp, RX)->irq, grp);
1693 free_irq(gfar_irq(grp, ER)->irq, grp);
1696 void stop_gfar(struct net_device *dev)
1698 struct gfar_private *priv = netdev_priv(dev);
1699 unsigned long flags;
1702 phy_stop(priv->phydev);
1706 local_irq_save(flags);
1714 local_irq_restore(flags);
1717 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1718 for (i = 0; i < priv->num_grps; i++)
1719 free_grp_irqs(&priv->gfargrp[i]);
1721 for (i = 0; i < priv->num_grps; i++)
1722 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
1726 free_skb_resources(priv);
1729 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1731 struct txbd8 *txbdp;
1732 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1735 txbdp = tx_queue->tx_bd_base;
1737 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1738 if (!tx_queue->tx_skbuff[i])
1741 dma_unmap_single(priv->dev, txbdp->bufPtr,
1742 txbdp->length, DMA_TO_DEVICE);
1744 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1747 dma_unmap_page(priv->dev, txbdp->bufPtr,
1748 txbdp->length, DMA_TO_DEVICE);
1751 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1752 tx_queue->tx_skbuff[i] = NULL;
1754 kfree(tx_queue->tx_skbuff);
1755 tx_queue->tx_skbuff = NULL;
1758 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1760 struct rxbd8 *rxbdp;
1761 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1764 rxbdp = rx_queue->rx_bd_base;
1766 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1767 if (rx_queue->rx_skbuff[i]) {
1768 dma_unmap_single(priv->dev, rxbdp->bufPtr,
1769 priv->rx_buffer_size,
1771 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1772 rx_queue->rx_skbuff[i] = NULL;
1778 kfree(rx_queue->rx_skbuff);
1779 rx_queue->rx_skbuff = NULL;
1782 /* If there are any tx skbs or rx skbs still around, free them.
1783 * Then free tx_skbuff and rx_skbuff
1785 static void free_skb_resources(struct gfar_private *priv)
1787 struct gfar_priv_tx_q *tx_queue = NULL;
1788 struct gfar_priv_rx_q *rx_queue = NULL;
1791 /* Go through all the buffer descriptors and free their data buffers */
1792 for (i = 0; i < priv->num_tx_queues; i++) {
1793 struct netdev_queue *txq;
1795 tx_queue = priv->tx_queue[i];
1796 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1797 if (tx_queue->tx_skbuff)
1798 free_skb_tx_queue(tx_queue);
1799 netdev_tx_reset_queue(txq);
1802 for (i = 0; i < priv->num_rx_queues; i++) {
1803 rx_queue = priv->rx_queue[i];
1804 if (rx_queue->rx_skbuff)
1805 free_skb_rx_queue(rx_queue);
1808 dma_free_coherent(priv->dev,
1809 sizeof(struct txbd8) * priv->total_tx_ring_size +
1810 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1811 priv->tx_queue[0]->tx_bd_base,
1812 priv->tx_queue[0]->tx_bd_dma_base);
1815 void gfar_start(struct net_device *dev)
1817 struct gfar_private *priv = netdev_priv(dev);
1818 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1822 /* Enable Rx and Tx in MACCFG1 */
1823 tempval = gfar_read(®s->maccfg1);
1824 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1825 gfar_write(®s->maccfg1, tempval);
1827 /* Initialize DMACTRL to have WWR and WOP */
1828 tempval = gfar_read(®s->dmactrl);
1829 tempval |= DMACTRL_INIT_SETTINGS;
1830 gfar_write(®s->dmactrl, tempval);
1832 /* Make sure we aren't stopped */
1833 tempval = gfar_read(®s->dmactrl);
1834 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1835 gfar_write(®s->dmactrl, tempval);
1837 for (i = 0; i < priv->num_grps; i++) {
1838 regs = priv->gfargrp[i].regs;
1839 /* Clear THLT/RHLT, so that the DMA starts polling now */
1840 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1841 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1842 /* Unmask the interrupts we look for */
1843 gfar_write(®s->imask, IMASK_DEFAULT);
1846 dev->trans_start = jiffies; /* prevent tx timeout */
1849 static void gfar_configure_coalescing(struct gfar_private *priv,
1850 unsigned long tx_mask, unsigned long rx_mask)
1852 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1855 if (priv->mode == MQ_MG_MODE) {
1858 baddr = ®s->txic0;
1859 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1860 gfar_write(baddr + i, 0);
1861 if (likely(priv->tx_queue[i]->txcoalescing))
1862 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1865 baddr = ®s->rxic0;
1866 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1867 gfar_write(baddr + i, 0);
1868 if (likely(priv->rx_queue[i]->rxcoalescing))
1869 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1872 /* Backward compatible case -- even if we enable
1873 * multiple queues, there's only single reg to program
1875 gfar_write(®s->txic, 0);
1876 if (likely(priv->tx_queue[0]->txcoalescing))
1877 gfar_write(®s->txic, priv->tx_queue[0]->txic);
1879 gfar_write(®s->rxic, 0);
1880 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1881 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
1885 void gfar_configure_coalescing_all(struct gfar_private *priv)
1887 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1890 static int register_grp_irqs(struct gfar_priv_grp *grp)
1892 struct gfar_private *priv = grp->priv;
1893 struct net_device *dev = priv->ndev;
1896 /* If the device has multiple interrupts, register for
1897 * them. Otherwise, only register for the one
1899 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1900 /* Install our interrupt handlers for Error,
1901 * Transmit, and Receive
1903 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1904 gfar_irq(grp, ER)->name, grp);
1906 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1907 gfar_irq(grp, ER)->irq);
1911 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1912 gfar_irq(grp, TX)->name, grp);
1914 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1915 gfar_irq(grp, TX)->irq);
1918 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1919 gfar_irq(grp, RX)->name, grp);
1921 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1922 gfar_irq(grp, RX)->irq);
1926 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1927 gfar_irq(grp, TX)->name, grp);
1929 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1930 gfar_irq(grp, TX)->irq);
1938 free_irq(gfar_irq(grp, TX)->irq, grp);
1940 free_irq(gfar_irq(grp, ER)->irq, grp);
1946 /* Bring the controller up and running */
1947 int startup_gfar(struct net_device *ndev)
1949 struct gfar_private *priv = netdev_priv(ndev);
1950 struct gfar __iomem *regs = NULL;
1953 for (i = 0; i < priv->num_grps; i++) {
1954 regs= priv->gfargrp[i].regs;
1955 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1958 regs= priv->gfargrp[0].regs;
1959 err = gfar_alloc_skb_resources(ndev);
1963 gfar_init_mac(ndev);
1965 for (i = 0; i < priv->num_grps; i++) {
1966 err = register_grp_irqs(&priv->gfargrp[i]);
1968 for (j = 0; j < i; j++)
1969 free_grp_irqs(&priv->gfargrp[j]);
1974 /* Start the controller */
1977 phy_start(priv->phydev);
1979 gfar_configure_coalescing_all(priv);
1984 free_skb_resources(priv);
1988 /* Called when something needs to use the ethernet device
1989 * Returns 0 for success.
1991 static int gfar_enet_open(struct net_device *dev)
1993 struct gfar_private *priv = netdev_priv(dev);
1998 /* Initialize a bunch of registers */
1999 init_registers(dev);
2001 gfar_set_mac_address(dev);
2003 err = init_phy(dev);
2010 err = startup_gfar(dev);
2016 netif_tx_start_all_queues(dev);
2018 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2023 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2025 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2027 memset(fcb, 0, GMAC_FCB_LEN);
2032 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2035 /* If we're here, it's a IP packet with a TCP or UDP
2036 * payload. We set it to checksum, using a pseudo-header
2039 u8 flags = TXFCB_DEFAULT;
2041 /* Tell the controller what the protocol is
2042 * And provide the already calculated phcs
2044 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2046 fcb->phcs = udp_hdr(skb)->check;
2048 fcb->phcs = tcp_hdr(skb)->check;
2050 /* l3os is the distance between the start of the
2051 * frame (skb->data) and the start of the IP hdr.
2052 * l4os is the distance between the start of the
2053 * l3 hdr and the l4 hdr
2055 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2056 fcb->l4os = skb_network_header_len(skb);
2061 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2063 fcb->flags |= TXFCB_VLN;
2064 fcb->vlctl = vlan_tx_tag_get(skb);
2067 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2068 struct txbd8 *base, int ring_size)
2070 struct txbd8 *new_bd = bdp + stride;
2072 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2075 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2078 return skip_txbd(bdp, 1, base, ring_size);
2081 /* eTSEC12: csum generation not supported for some fcb offsets */
2082 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2083 unsigned long fcb_addr)
2085 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2086 (fcb_addr % 0x20) > 0x18);
2089 /* eTSEC76: csum generation for frames larger than 2500 may
2090 * cause excess delays before start of transmission
2092 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2095 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2099 /* This is called by the kernel when a frame is ready for transmission.
2100 * It is pointed to by the dev->hard_start_xmit function pointer
2102 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2104 struct gfar_private *priv = netdev_priv(dev);
2105 struct gfar_priv_tx_q *tx_queue = NULL;
2106 struct netdev_queue *txq;
2107 struct gfar __iomem *regs = NULL;
2108 struct txfcb *fcb = NULL;
2109 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2112 int do_tstamp, do_csum, do_vlan;
2114 unsigned long flags;
2115 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2117 rq = skb->queue_mapping;
2118 tx_queue = priv->tx_queue[rq];
2119 txq = netdev_get_tx_queue(dev, rq);
2120 base = tx_queue->tx_bd_base;
2121 regs = tx_queue->grp->regs;
2123 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2124 do_vlan = vlan_tx_tag_present(skb);
2125 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2128 if (do_csum || do_vlan)
2129 fcb_len = GMAC_FCB_LEN;
2131 /* check if time stamp should be generated */
2132 if (unlikely(do_tstamp))
2133 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2135 /* make space for additional header when fcb is needed */
2136 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2137 struct sk_buff *skb_new;
2139 skb_new = skb_realloc_headroom(skb, fcb_len);
2141 dev->stats.tx_errors++;
2143 return NETDEV_TX_OK;
2147 skb_set_owner_w(skb_new, skb->sk);
2152 /* total number of fragments in the SKB */
2153 nr_frags = skb_shinfo(skb)->nr_frags;
2155 /* calculate the required number of TxBDs for this skb */
2156 if (unlikely(do_tstamp))
2157 nr_txbds = nr_frags + 2;
2159 nr_txbds = nr_frags + 1;
2161 /* check if there is space to queue this packet */
2162 if (nr_txbds > tx_queue->num_txbdfree) {
2163 /* no space, stop the queue */
2164 netif_tx_stop_queue(txq);
2165 dev->stats.tx_fifo_errors++;
2166 return NETDEV_TX_BUSY;
2169 /* Update transmit stats */
2170 bytes_sent = skb->len;
2171 tx_queue->stats.tx_bytes += bytes_sent;
2172 /* keep Tx bytes on wire for BQL accounting */
2173 GFAR_CB(skb)->bytes_sent = bytes_sent;
2174 tx_queue->stats.tx_packets++;
2176 txbdp = txbdp_start = tx_queue->cur_tx;
2177 lstatus = txbdp->lstatus;
2179 /* Time stamp insertion requires one additional TxBD */
2180 if (unlikely(do_tstamp))
2181 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2182 tx_queue->tx_ring_size);
2184 if (nr_frags == 0) {
2185 if (unlikely(do_tstamp))
2186 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2189 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2191 /* Place the fragment addresses and lengths into the TxBDs */
2192 for (i = 0; i < nr_frags; i++) {
2193 unsigned int frag_len;
2194 /* Point at the next BD, wrapping as needed */
2195 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2197 frag_len = skb_shinfo(skb)->frags[i].size;
2199 lstatus = txbdp->lstatus | frag_len |
2200 BD_LFLAG(TXBD_READY);
2202 /* Handle the last BD specially */
2203 if (i == nr_frags - 1)
2204 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2206 bufaddr = skb_frag_dma_map(priv->dev,
2207 &skb_shinfo(skb)->frags[i],
2212 /* set the TxBD length and buffer pointer */
2213 txbdp->bufPtr = bufaddr;
2214 txbdp->lstatus = lstatus;
2217 lstatus = txbdp_start->lstatus;
2220 /* Add TxPAL between FCB and frame if required */
2221 if (unlikely(do_tstamp)) {
2222 skb_push(skb, GMAC_TXPAL_LEN);
2223 memset(skb->data, 0, GMAC_TXPAL_LEN);
2226 /* Add TxFCB if required */
2228 fcb = gfar_add_fcb(skb);
2229 lstatus |= BD_LFLAG(TXBD_TOE);
2232 /* Set up checksumming */
2234 gfar_tx_checksum(skb, fcb, fcb_len);
2236 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2237 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2238 __skb_pull(skb, GMAC_FCB_LEN);
2239 skb_checksum_help(skb);
2240 if (do_vlan || do_tstamp) {
2241 /* put back a new fcb for vlan/tstamp TOE */
2242 fcb = gfar_add_fcb(skb);
2244 /* Tx TOE not used */
2245 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2252 gfar_tx_vlan(skb, fcb);
2254 /* Setup tx hardware time stamping if requested */
2255 if (unlikely(do_tstamp)) {
2256 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2260 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2261 skb_headlen(skb), DMA_TO_DEVICE);
2263 /* If time stamping is requested one additional TxBD must be set up. The
2264 * first TxBD points to the FCB and must have a data length of
2265 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2266 * the full frame length.
2268 if (unlikely(do_tstamp)) {
2269 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2270 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2271 (skb_headlen(skb) - fcb_len);
2272 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2274 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2277 netdev_tx_sent_queue(txq, bytes_sent);
2279 /* We can work in parallel with gfar_clean_tx_ring(), except
2280 * when modifying num_txbdfree. Note that we didn't grab the lock
2281 * when we were reading the num_txbdfree and checking for available
2282 * space, that's because outside of this function it can only grow,
2283 * and once we've got needed space, it cannot suddenly disappear.
2285 * The lock also protects us from gfar_error(), which can modify
2286 * regs->tstat and thus retrigger the transfers, which is why we
2287 * also must grab the lock before setting ready bit for the first
2288 * to be transmitted BD.
2290 spin_lock_irqsave(&tx_queue->txlock, flags);
2292 /* The powerpc-specific eieio() is used, as wmb() has too strong
2293 * semantics (it requires synchronization between cacheable and
2294 * uncacheable mappings, which eieio doesn't provide and which we
2295 * don't need), thus requiring a more expensive sync instruction. At
2296 * some point, the set of architecture-independent barrier functions
2297 * should be expanded to include weaker barriers.
2301 txbdp_start->lstatus = lstatus;
2303 eieio(); /* force lstatus write before tx_skbuff */
2305 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2307 /* Update the current skb pointer to the next entry we will use
2308 * (wrapping if necessary)
2310 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2311 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2313 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2315 /* reduce TxBD free count */
2316 tx_queue->num_txbdfree -= (nr_txbds);
2318 /* If the next BD still needs to be cleaned up, then the bds
2319 * are full. We need to tell the kernel to stop sending us stuff.
2321 if (!tx_queue->num_txbdfree) {
2322 netif_tx_stop_queue(txq);
2324 dev->stats.tx_fifo_errors++;
2327 /* Tell the DMA to go go go */
2328 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2331 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2333 return NETDEV_TX_OK;
2336 /* Stops the kernel queue, and halts the controller */
2337 static int gfar_close(struct net_device *dev)
2339 struct gfar_private *priv = netdev_priv(dev);
2343 cancel_work_sync(&priv->reset_task);
2346 /* Disconnect from the PHY */
2347 phy_disconnect(priv->phydev);
2348 priv->phydev = NULL;
2350 netif_tx_stop_all_queues(dev);
2355 /* Changes the mac address if the controller is not running. */
2356 static int gfar_set_mac_address(struct net_device *dev)
2358 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2363 /* Check if rx parser should be activated */
2364 void gfar_check_rx_parser_mode(struct gfar_private *priv)
2366 struct gfar __iomem *regs;
2369 regs = priv->gfargrp[0].regs;
2371 tempval = gfar_read(®s->rctrl);
2372 /* If parse is no longer required, then disable parser */
2373 if (tempval & RCTRL_REQ_PARSER) {
2374 tempval |= RCTRL_PRSDEP_INIT;
2375 priv->uses_rxfcb = 1;
2377 tempval &= ~RCTRL_PRSDEP_INIT;
2378 priv->uses_rxfcb = 0;
2380 gfar_write(®s->rctrl, tempval);
2383 /* Enables and disables VLAN insertion/extraction */
2384 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2386 struct gfar_private *priv = netdev_priv(dev);
2387 struct gfar __iomem *regs = NULL;
2388 unsigned long flags;
2391 regs = priv->gfargrp[0].regs;
2392 local_irq_save(flags);
2395 if (features & NETIF_F_HW_VLAN_CTAG_TX) {
2396 /* Enable VLAN tag insertion */
2397 tempval = gfar_read(®s->tctrl);
2398 tempval |= TCTRL_VLINS;
2399 gfar_write(®s->tctrl, tempval);
2401 /* Disable VLAN tag insertion */
2402 tempval = gfar_read(®s->tctrl);
2403 tempval &= ~TCTRL_VLINS;
2404 gfar_write(®s->tctrl, tempval);
2407 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2408 /* Enable VLAN tag extraction */
2409 tempval = gfar_read(®s->rctrl);
2410 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2411 gfar_write(®s->rctrl, tempval);
2412 priv->uses_rxfcb = 1;
2414 /* Disable VLAN tag extraction */
2415 tempval = gfar_read(®s->rctrl);
2416 tempval &= ~RCTRL_VLEX;
2417 gfar_write(®s->rctrl, tempval);
2419 gfar_check_rx_parser_mode(priv);
2422 gfar_change_mtu(dev, dev->mtu);
2425 local_irq_restore(flags);
2428 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2430 int tempsize, tempval;
2431 struct gfar_private *priv = netdev_priv(dev);
2432 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2433 int oldsize = priv->rx_buffer_size;
2434 int frame_size = new_mtu + ETH_HLEN;
2436 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2437 netif_err(priv, drv, dev, "Invalid MTU setting\n");
2441 if (priv->uses_rxfcb)
2442 frame_size += GMAC_FCB_LEN;
2444 frame_size += priv->padding;
2446 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2447 INCREMENTAL_BUFFER_SIZE;
2449 /* Only stop and start the controller if it isn't already
2450 * stopped, and we changed something
2452 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2455 priv->rx_buffer_size = tempsize;
2459 gfar_write(®s->mrblr, priv->rx_buffer_size);
2460 gfar_write(®s->maxfrm, priv->rx_buffer_size);
2462 /* If the mtu is larger than the max size for standard
2463 * ethernet frames (ie, a jumbo frame), then set maccfg2
2464 * to allow huge frames, and to check the length
2466 tempval = gfar_read(®s->maccfg2);
2468 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2469 gfar_has_errata(priv, GFAR_ERRATA_74))
2470 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2472 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2474 gfar_write(®s->maccfg2, tempval);
2476 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2482 /* gfar_reset_task gets scheduled when a packet has not been
2483 * transmitted after a set amount of time.
2484 * For now, assume that clearing out all the structures, and
2485 * starting over will fix the problem.
2487 static void gfar_reset_task(struct work_struct *work)
2489 struct gfar_private *priv = container_of(work, struct gfar_private,
2491 struct net_device *dev = priv->ndev;
2493 if (dev->flags & IFF_UP) {
2494 netif_tx_stop_all_queues(dev);
2497 netif_tx_start_all_queues(dev);
2500 netif_tx_schedule_all(dev);
2503 static void gfar_timeout(struct net_device *dev)
2505 struct gfar_private *priv = netdev_priv(dev);
2507 dev->stats.tx_errors++;
2508 schedule_work(&priv->reset_task);
2511 static void gfar_align_skb(struct sk_buff *skb)
2513 /* We need the data buffer to be aligned properly. We will reserve
2514 * as many bytes as needed to align the data properly
2516 skb_reserve(skb, RXBUF_ALIGNMENT -
2517 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2520 /* Interrupt Handler for Transmit complete */
2521 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2523 struct net_device *dev = tx_queue->dev;
2524 struct netdev_queue *txq;
2525 struct gfar_private *priv = netdev_priv(dev);
2526 struct txbd8 *bdp, *next = NULL;
2527 struct txbd8 *lbdp = NULL;
2528 struct txbd8 *base = tx_queue->tx_bd_base;
2529 struct sk_buff *skb;
2531 int tx_ring_size = tx_queue->tx_ring_size;
2532 int frags = 0, nr_txbds = 0;
2535 int tqi = tx_queue->qindex;
2536 unsigned int bytes_sent = 0;
2540 txq = netdev_get_tx_queue(dev, tqi);
2541 bdp = tx_queue->dirty_tx;
2542 skb_dirtytx = tx_queue->skb_dirtytx;
2544 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2545 unsigned long flags;
2547 frags = skb_shinfo(skb)->nr_frags;
2549 /* When time stamping, one additional TxBD must be freed.
2550 * Also, we need to dma_unmap_single() the TxPAL.
2552 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2553 nr_txbds = frags + 2;
2555 nr_txbds = frags + 1;
2557 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2559 lstatus = lbdp->lstatus;
2561 /* Only clean completed frames */
2562 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2563 (lstatus & BD_LENGTH_MASK))
2566 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2567 next = next_txbd(bdp, base, tx_ring_size);
2568 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2570 buflen = bdp->length;
2572 dma_unmap_single(priv->dev, bdp->bufPtr,
2573 buflen, DMA_TO_DEVICE);
2575 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2576 struct skb_shared_hwtstamps shhwtstamps;
2577 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2579 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2580 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2581 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2582 skb_tstamp_tx(skb, &shhwtstamps);
2583 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2587 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2588 bdp = next_txbd(bdp, base, tx_ring_size);
2590 for (i = 0; i < frags; i++) {
2591 dma_unmap_page(priv->dev, bdp->bufPtr,
2592 bdp->length, DMA_TO_DEVICE);
2593 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2594 bdp = next_txbd(bdp, base, tx_ring_size);
2597 bytes_sent += GFAR_CB(skb)->bytes_sent;
2599 dev_kfree_skb_any(skb);
2601 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2603 skb_dirtytx = (skb_dirtytx + 1) &
2604 TX_RING_MOD_MASK(tx_ring_size);
2607 spin_lock_irqsave(&tx_queue->txlock, flags);
2608 tx_queue->num_txbdfree += nr_txbds;
2609 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2612 /* If we freed a buffer, we can restart transmission, if necessary */
2613 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
2614 netif_wake_subqueue(dev, tqi);
2616 /* Update dirty indicators */
2617 tx_queue->skb_dirtytx = skb_dirtytx;
2618 tx_queue->dirty_tx = bdp;
2620 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2623 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2625 unsigned long flags;
2627 spin_lock_irqsave(&gfargrp->grplock, flags);
2628 if (napi_schedule_prep(&gfargrp->napi)) {
2629 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2630 __napi_schedule(&gfargrp->napi);
2632 /* Clear IEVENT, so interrupts aren't called again
2633 * because of the packets that have already arrived.
2635 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2637 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2641 /* Interrupt Handler for Transmit complete */
2642 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2644 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2648 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2649 struct sk_buff *skb)
2651 struct net_device *dev = rx_queue->dev;
2652 struct gfar_private *priv = netdev_priv(dev);
2655 buf = dma_map_single(priv->dev, skb->data,
2656 priv->rx_buffer_size, DMA_FROM_DEVICE);
2657 gfar_init_rxbdp(rx_queue, bdp, buf);
2660 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2662 struct gfar_private *priv = netdev_priv(dev);
2663 struct sk_buff *skb;
2665 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2669 gfar_align_skb(skb);
2674 struct sk_buff *gfar_new_skb(struct net_device *dev)
2676 return gfar_alloc_skb(dev);
2679 static inline void count_errors(unsigned short status, struct net_device *dev)
2681 struct gfar_private *priv = netdev_priv(dev);
2682 struct net_device_stats *stats = &dev->stats;
2683 struct gfar_extra_stats *estats = &priv->extra_stats;
2685 /* If the packet was truncated, none of the other errors matter */
2686 if (status & RXBD_TRUNCATED) {
2687 stats->rx_length_errors++;
2689 atomic64_inc(&estats->rx_trunc);
2693 /* Count the errors, if there were any */
2694 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2695 stats->rx_length_errors++;
2697 if (status & RXBD_LARGE)
2698 atomic64_inc(&estats->rx_large);
2700 atomic64_inc(&estats->rx_short);
2702 if (status & RXBD_NONOCTET) {
2703 stats->rx_frame_errors++;
2704 atomic64_inc(&estats->rx_nonoctet);
2706 if (status & RXBD_CRCERR) {
2707 atomic64_inc(&estats->rx_crcerr);
2708 stats->rx_crc_errors++;
2710 if (status & RXBD_OVERRUN) {
2711 atomic64_inc(&estats->rx_overrun);
2712 stats->rx_crc_errors++;
2716 irqreturn_t gfar_receive(int irq, void *grp_id)
2718 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2722 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2724 /* If valid headers were found, and valid sums
2725 * were verified, then we tell the kernel that no
2726 * checksumming is necessary. Otherwise, it is [FIXME]
2728 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2729 skb->ip_summed = CHECKSUM_UNNECESSARY;
2731 skb_checksum_none_assert(skb);
2735 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2736 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2737 int amount_pull, struct napi_struct *napi)
2739 struct gfar_private *priv = netdev_priv(dev);
2740 struct rxfcb *fcb = NULL;
2742 /* fcb is at the beginning if exists */
2743 fcb = (struct rxfcb *)skb->data;
2745 /* Remove the FCB from the skb
2746 * Remove the padded bytes, if there are any
2749 skb_record_rx_queue(skb, fcb->rq);
2750 skb_pull(skb, amount_pull);
2753 /* Get receive timestamp from the skb */
2754 if (priv->hwts_rx_en) {
2755 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2756 u64 *ns = (u64 *) skb->data;
2758 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2759 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2763 skb_pull(skb, priv->padding);
2765 if (dev->features & NETIF_F_RXCSUM)
2766 gfar_rx_checksum(skb, fcb);
2768 /* Tell the skb what kind of packet this is */
2769 skb->protocol = eth_type_trans(skb, dev);
2771 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2772 * Even if vlan rx accel is disabled, on some chips
2773 * RXFCB_VLN is pseudo randomly set.
2775 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2776 fcb->flags & RXFCB_VLN)
2777 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2779 /* Send the packet up the stack */
2780 napi_gro_receive(napi, skb);
2784 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2785 * until the budget/quota has been reached. Returns the number
2788 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2790 struct net_device *dev = rx_queue->dev;
2791 struct rxbd8 *bdp, *base;
2792 struct sk_buff *skb;
2796 struct gfar_private *priv = netdev_priv(dev);
2798 /* Get the first full descriptor */
2799 bdp = rx_queue->cur_rx;
2800 base = rx_queue->rx_bd_base;
2802 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2804 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2805 struct sk_buff *newskb;
2809 /* Add another skb for the future */
2810 newskb = gfar_new_skb(dev);
2812 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2814 dma_unmap_single(priv->dev, bdp->bufPtr,
2815 priv->rx_buffer_size, DMA_FROM_DEVICE);
2817 if (unlikely(!(bdp->status & RXBD_ERR) &&
2818 bdp->length > priv->rx_buffer_size))
2819 bdp->status = RXBD_LARGE;
2821 /* We drop the frame if we failed to allocate a new buffer */
2822 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2823 bdp->status & RXBD_ERR)) {
2824 count_errors(bdp->status, dev);
2826 if (unlikely(!newskb))
2831 /* Increment the number of packets */
2832 rx_queue->stats.rx_packets++;
2836 pkt_len = bdp->length - ETH_FCS_LEN;
2837 /* Remove the FCS from the packet length */
2838 skb_put(skb, pkt_len);
2839 rx_queue->stats.rx_bytes += pkt_len;
2840 skb_record_rx_queue(skb, rx_queue->qindex);
2841 gfar_process_frame(dev, skb, amount_pull,
2842 &rx_queue->grp->napi);
2845 netif_warn(priv, rx_err, dev, "Missing skb!\n");
2846 rx_queue->stats.rx_dropped++;
2847 atomic64_inc(&priv->extra_stats.rx_skbmissing);
2852 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2854 /* Setup the new bdp */
2855 gfar_new_rxbdp(rx_queue, bdp, newskb);
2857 /* Update to the next pointer */
2858 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2860 /* update to point at the next skb */
2861 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2862 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2865 /* Update the current rxbd pointer to be the next one */
2866 rx_queue->cur_rx = bdp;
2871 static int gfar_poll_sq(struct napi_struct *napi, int budget)
2873 struct gfar_priv_grp *gfargrp =
2874 container_of(napi, struct gfar_priv_grp, napi);
2875 struct gfar __iomem *regs = gfargrp->regs;
2876 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
2877 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
2880 /* Clear IEVENT, so interrupts aren't called again
2881 * because of the packets that have already arrived
2883 gfar_write(®s->ievent, IEVENT_RTX_MASK);
2885 /* run Tx cleanup to completion */
2886 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2887 gfar_clean_tx_ring(tx_queue);
2889 work_done = gfar_clean_rx_ring(rx_queue, budget);
2891 if (work_done < budget) {
2892 napi_complete(napi);
2893 /* Clear the halt bit in RSTAT */
2894 gfar_write(®s->rstat, gfargrp->rstat);
2896 gfar_write(®s->imask, IMASK_DEFAULT);
2898 /* If we are coalescing interrupts, update the timer
2899 * Otherwise, clear it
2901 gfar_write(®s->txic, 0);
2902 if (likely(tx_queue->txcoalescing))
2903 gfar_write(®s->txic, tx_queue->txic);
2905 gfar_write(®s->rxic, 0);
2906 if (unlikely(rx_queue->rxcoalescing))
2907 gfar_write(®s->rxic, rx_queue->rxic);
2913 static int gfar_poll(struct napi_struct *napi, int budget)
2915 struct gfar_priv_grp *gfargrp =
2916 container_of(napi, struct gfar_priv_grp, napi);
2917 struct gfar_private *priv = gfargrp->priv;
2918 struct gfar __iomem *regs = gfargrp->regs;
2919 struct gfar_priv_tx_q *tx_queue = NULL;
2920 struct gfar_priv_rx_q *rx_queue = NULL;
2921 int work_done = 0, work_done_per_q = 0;
2922 int i, budget_per_q = 0;
2923 int has_tx_work = 0;
2924 unsigned long rstat_rxf;
2927 /* Clear IEVENT, so interrupts aren't called again
2928 * because of the packets that have already arrived
2930 gfar_write(®s->ievent, IEVENT_RTX_MASK);
2932 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
2934 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2936 budget_per_q = budget/num_act_queues;
2938 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2939 tx_queue = priv->tx_queue[i];
2940 /* run Tx cleanup to completion */
2941 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2942 gfar_clean_tx_ring(tx_queue);
2947 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2948 /* skip queue if not active */
2949 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2952 rx_queue = priv->rx_queue[i];
2954 gfar_clean_rx_ring(rx_queue, budget_per_q);
2955 work_done += work_done_per_q;
2957 /* finished processing this queue */
2958 if (work_done_per_q < budget_per_q) {
2959 /* clear active queue hw indication */
2960 gfar_write(®s->rstat,
2961 RSTAT_CLEAR_RXF0 >> i);
2964 if (!num_act_queues)
2969 if (!num_act_queues && !has_tx_work) {
2971 napi_complete(napi);
2973 /* Clear the halt bit in RSTAT */
2974 gfar_write(®s->rstat, gfargrp->rstat);
2976 gfar_write(®s->imask, IMASK_DEFAULT);
2978 /* If we are coalescing interrupts, update the timer
2979 * Otherwise, clear it
2981 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2982 gfargrp->tx_bit_map);
2988 #ifdef CONFIG_NET_POLL_CONTROLLER
2989 /* Polling 'interrupt' - used by things like netconsole to send skbs
2990 * without having to re-enable interrupts. It's not called while
2991 * the interrupt routine is executing.
2993 static void gfar_netpoll(struct net_device *dev)
2995 struct gfar_private *priv = netdev_priv(dev);
2998 /* If the device has multiple interrupts, run tx/rx */
2999 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3000 for (i = 0; i < priv->num_grps; i++) {
3001 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3003 disable_irq(gfar_irq(grp, TX)->irq);
3004 disable_irq(gfar_irq(grp, RX)->irq);
3005 disable_irq(gfar_irq(grp, ER)->irq);
3006 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3007 enable_irq(gfar_irq(grp, ER)->irq);
3008 enable_irq(gfar_irq(grp, RX)->irq);
3009 enable_irq(gfar_irq(grp, TX)->irq);
3012 for (i = 0; i < priv->num_grps; i++) {
3013 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3015 disable_irq(gfar_irq(grp, TX)->irq);
3016 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3017 enable_irq(gfar_irq(grp, TX)->irq);
3023 /* The interrupt handler for devices with one interrupt */
3024 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3026 struct gfar_priv_grp *gfargrp = grp_id;
3028 /* Save ievent for future reference */
3029 u32 events = gfar_read(&gfargrp->regs->ievent);
3031 /* Check for reception */
3032 if (events & IEVENT_RX_MASK)
3033 gfar_receive(irq, grp_id);
3035 /* Check for transmit completion */
3036 if (events & IEVENT_TX_MASK)
3037 gfar_transmit(irq, grp_id);
3039 /* Check for errors */
3040 if (events & IEVENT_ERR_MASK)
3041 gfar_error(irq, grp_id);
3046 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3048 struct phy_device *phydev = priv->phydev;
3051 if (!phydev->duplex)
3054 if (!priv->pause_aneg_en) {
3055 if (priv->tx_pause_en)
3056 val |= MACCFG1_TX_FLOW;
3057 if (priv->rx_pause_en)
3058 val |= MACCFG1_RX_FLOW;
3060 u16 lcl_adv, rmt_adv;
3062 /* get link partner capabilities */
3065 rmt_adv = LPA_PAUSE_CAP;
3066 if (phydev->asym_pause)
3067 rmt_adv |= LPA_PAUSE_ASYM;
3069 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3071 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3072 if (flowctrl & FLOW_CTRL_TX)
3073 val |= MACCFG1_TX_FLOW;
3074 if (flowctrl & FLOW_CTRL_RX)
3075 val |= MACCFG1_RX_FLOW;
3081 /* Called every time the controller might need to be made
3082 * aware of new link state. The PHY code conveys this
3083 * information through variables in the phydev structure, and this
3084 * function converts those variables into the appropriate
3085 * register values, and can bring down the device if needed.
3087 static void adjust_link(struct net_device *dev)
3089 struct gfar_private *priv = netdev_priv(dev);
3090 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3091 unsigned long flags;
3092 struct phy_device *phydev = priv->phydev;
3095 local_irq_save(flags);
3099 u32 tempval1 = gfar_read(®s->maccfg1);
3100 u32 tempval = gfar_read(®s->maccfg2);
3101 u32 ecntrl = gfar_read(®s->ecntrl);
3103 /* Now we make sure that we can be in full duplex mode.
3104 * If not, we operate in half-duplex mode.
3106 if (phydev->duplex != priv->oldduplex) {
3108 if (!(phydev->duplex))
3109 tempval &= ~(MACCFG2_FULL_DUPLEX);
3111 tempval |= MACCFG2_FULL_DUPLEX;
3113 priv->oldduplex = phydev->duplex;
3116 if (phydev->speed != priv->oldspeed) {
3118 switch (phydev->speed) {
3121 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3123 ecntrl &= ~(ECNTRL_R100);
3128 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3130 /* Reduced mode distinguishes
3131 * between 10 and 100
3133 if (phydev->speed == SPEED_100)
3134 ecntrl |= ECNTRL_R100;
3136 ecntrl &= ~(ECNTRL_R100);
3139 netif_warn(priv, link, dev,
3140 "Ack! Speed (%d) is not 10/100/1000!\n",
3145 priv->oldspeed = phydev->speed;
3148 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3149 tempval1 |= gfar_get_flowctrl_cfg(priv);
3151 gfar_write(®s->maccfg1, tempval1);
3152 gfar_write(®s->maccfg2, tempval);
3153 gfar_write(®s->ecntrl, ecntrl);
3155 if (!priv->oldlink) {
3159 } else if (priv->oldlink) {
3163 priv->oldduplex = -1;
3166 if (new_state && netif_msg_link(priv))
3167 phy_print_status(phydev);
3169 local_irq_restore(flags);
3172 /* Update the hash table based on the current list of multicast
3173 * addresses we subscribe to. Also, change the promiscuity of
3174 * the device based on the flags (this function is called
3175 * whenever dev->flags is changed
3177 static void gfar_set_multi(struct net_device *dev)
3179 struct netdev_hw_addr *ha;
3180 struct gfar_private *priv = netdev_priv(dev);
3181 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3184 if (dev->flags & IFF_PROMISC) {
3185 /* Set RCTRL to PROM */
3186 tempval = gfar_read(®s->rctrl);
3187 tempval |= RCTRL_PROM;
3188 gfar_write(®s->rctrl, tempval);
3190 /* Set RCTRL to not PROM */
3191 tempval = gfar_read(®s->rctrl);
3192 tempval &= ~(RCTRL_PROM);
3193 gfar_write(®s->rctrl, tempval);
3196 if (dev->flags & IFF_ALLMULTI) {
3197 /* Set the hash to rx all multicast frames */
3198 gfar_write(®s->igaddr0, 0xffffffff);
3199 gfar_write(®s->igaddr1, 0xffffffff);
3200 gfar_write(®s->igaddr2, 0xffffffff);
3201 gfar_write(®s->igaddr3, 0xffffffff);
3202 gfar_write(®s->igaddr4, 0xffffffff);
3203 gfar_write(®s->igaddr5, 0xffffffff);
3204 gfar_write(®s->igaddr6, 0xffffffff);
3205 gfar_write(®s->igaddr7, 0xffffffff);
3206 gfar_write(®s->gaddr0, 0xffffffff);
3207 gfar_write(®s->gaddr1, 0xffffffff);
3208 gfar_write(®s->gaddr2, 0xffffffff);
3209 gfar_write(®s->gaddr3, 0xffffffff);
3210 gfar_write(®s->gaddr4, 0xffffffff);
3211 gfar_write(®s->gaddr5, 0xffffffff);
3212 gfar_write(®s->gaddr6, 0xffffffff);
3213 gfar_write(®s->gaddr7, 0xffffffff);
3218 /* zero out the hash */
3219 gfar_write(®s->igaddr0, 0x0);
3220 gfar_write(®s->igaddr1, 0x0);
3221 gfar_write(®s->igaddr2, 0x0);
3222 gfar_write(®s->igaddr3, 0x0);
3223 gfar_write(®s->igaddr4, 0x0);
3224 gfar_write(®s->igaddr5, 0x0);
3225 gfar_write(®s->igaddr6, 0x0);
3226 gfar_write(®s->igaddr7, 0x0);
3227 gfar_write(®s->gaddr0, 0x0);
3228 gfar_write(®s->gaddr1, 0x0);
3229 gfar_write(®s->gaddr2, 0x0);
3230 gfar_write(®s->gaddr3, 0x0);
3231 gfar_write(®s->gaddr4, 0x0);
3232 gfar_write(®s->gaddr5, 0x0);
3233 gfar_write(®s->gaddr6, 0x0);
3234 gfar_write(®s->gaddr7, 0x0);
3236 /* If we have extended hash tables, we need to
3237 * clear the exact match registers to prepare for
3240 if (priv->extended_hash) {
3241 em_num = GFAR_EM_NUM + 1;
3242 gfar_clear_exact_match(dev);
3249 if (netdev_mc_empty(dev))
3252 /* Parse the list, and set the appropriate bits */
3253 netdev_for_each_mc_addr(ha, dev) {
3255 gfar_set_mac_for_addr(dev, idx, ha->addr);
3258 gfar_set_hash_for_addr(dev, ha->addr);
3264 /* Clears each of the exact match registers to zero, so they
3265 * don't interfere with normal reception
3267 static void gfar_clear_exact_match(struct net_device *dev)
3270 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3272 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3273 gfar_set_mac_for_addr(dev, idx, zero_arr);
3276 /* Set the appropriate hash bit for the given addr */
3277 /* The algorithm works like so:
3278 * 1) Take the Destination Address (ie the multicast address), and
3279 * do a CRC on it (little endian), and reverse the bits of the
3281 * 2) Use the 8 most significant bits as a hash into a 256-entry
3282 * table. The table is controlled through 8 32-bit registers:
3283 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3284 * gaddr7. This means that the 3 most significant bits in the
3285 * hash index which gaddr register to use, and the 5 other bits
3286 * indicate which bit (assuming an IBM numbering scheme, which
3287 * for PowerPC (tm) is usually the case) in the register holds
3290 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3293 struct gfar_private *priv = netdev_priv(dev);
3294 u32 result = ether_crc(ETH_ALEN, addr);
3295 int width = priv->hash_width;
3296 u8 whichbit = (result >> (32 - width)) & 0x1f;
3297 u8 whichreg = result >> (32 - width + 5);
3298 u32 value = (1 << (31-whichbit));
3300 tempval = gfar_read(priv->hash_regs[whichreg]);
3302 gfar_write(priv->hash_regs[whichreg], tempval);
3306 /* There are multiple MAC Address register pairs on some controllers
3307 * This function sets the numth pair to a given address
3309 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3312 struct gfar_private *priv = netdev_priv(dev);
3313 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3315 char tmpbuf[ETH_ALEN];
3317 u32 __iomem *macptr = ®s->macstnaddr1;
3321 /* Now copy it into the mac registers backwards, cuz
3322 * little endian is silly
3324 for (idx = 0; idx < ETH_ALEN; idx++)
3325 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3327 gfar_write(macptr, *((u32 *) (tmpbuf)));
3329 tempval = *((u32 *) (tmpbuf + 4));
3331 gfar_write(macptr+1, tempval);
3334 /* GFAR error interrupt handler */
3335 static irqreturn_t gfar_error(int irq, void *grp_id)
3337 struct gfar_priv_grp *gfargrp = grp_id;
3338 struct gfar __iomem *regs = gfargrp->regs;
3339 struct gfar_private *priv= gfargrp->priv;
3340 struct net_device *dev = priv->ndev;
3342 /* Save ievent for future reference */
3343 u32 events = gfar_read(®s->ievent);
3346 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3348 /* Magic Packet is not an error. */
3349 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3350 (events & IEVENT_MAG))
3351 events &= ~IEVENT_MAG;
3354 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3356 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3357 events, gfar_read(®s->imask));
3359 /* Update the error counters */
3360 if (events & IEVENT_TXE) {
3361 dev->stats.tx_errors++;
3363 if (events & IEVENT_LC)
3364 dev->stats.tx_window_errors++;
3365 if (events & IEVENT_CRL)
3366 dev->stats.tx_aborted_errors++;
3367 if (events & IEVENT_XFUN) {
3368 unsigned long flags;
3370 netif_dbg(priv, tx_err, dev,
3371 "TX FIFO underrun, packet dropped\n");
3372 dev->stats.tx_dropped++;
3373 atomic64_inc(&priv->extra_stats.tx_underrun);
3375 local_irq_save(flags);
3378 /* Reactivate the Tx Queues */
3379 gfar_write(®s->tstat, gfargrp->tstat);
3382 local_irq_restore(flags);
3384 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3386 if (events & IEVENT_BSY) {
3387 dev->stats.rx_errors++;
3388 atomic64_inc(&priv->extra_stats.rx_bsy);
3390 gfar_receive(irq, grp_id);
3392 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3393 gfar_read(®s->rstat));
3395 if (events & IEVENT_BABR) {
3396 dev->stats.rx_errors++;
3397 atomic64_inc(&priv->extra_stats.rx_babr);
3399 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3401 if (events & IEVENT_EBERR) {
3402 atomic64_inc(&priv->extra_stats.eberr);
3403 netif_dbg(priv, rx_err, dev, "bus error\n");
3405 if (events & IEVENT_RXC)
3406 netif_dbg(priv, rx_status, dev, "control frame\n");
3408 if (events & IEVENT_BABT) {
3409 atomic64_inc(&priv->extra_stats.tx_babt);
3410 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3415 static struct of_device_id gfar_match[] =
3419 .compatible = "gianfar",
3422 .compatible = "fsl,etsec2",
3426 MODULE_DEVICE_TABLE(of, gfar_match);
3428 /* Structure for a device driver */
3429 static struct platform_driver gfar_driver = {
3431 .name = "fsl-gianfar",
3432 .owner = THIS_MODULE,
3434 .of_match_table = gfar_match,
3436 .probe = gfar_probe,
3437 .remove = gfar_remove,
3440 module_platform_driver(gfar_driver);