1 /* 10G controller driver for Samsung SoCs
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/clk.h>
16 #include <linux/crc32.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/etherdevice.h>
19 #include <linux/ethtool.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/mii.h>
28 #include <linux/module.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/platform_device.h>
33 #include <linux/prefetch.h>
34 #include <linux/skbuff.h>
35 #include <linux/slab.h>
36 #include <linux/tcp.h>
37 #include <linux/sxgbe_platform.h>
39 #include "sxgbe_common.h"
40 #include "sxgbe_desc.h"
41 #include "sxgbe_dma.h"
42 #include "sxgbe_mtl.h"
43 #include "sxgbe_reg.h"
45 #define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46 #define JUMBO_LEN 9000
48 /* Module parameters */
50 #define DMA_TX_SIZE 512
51 #define DMA_RX_SIZE 1024
53 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54 /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55 #define SXGBE_DEFAULT_LPI_TIMER 1000
57 static int debug = -1;
58 static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
60 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
62 module_param(debug, int, S_IRUGO | S_IWUSR);
63 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
64 NETIF_MSG_LINK | NETIF_MSG_IFUP |
65 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
67 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
68 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
69 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
71 #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
73 #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
76 * sxgbe_verify_args - verify the driver parameters.
77 * Description: it verifies if some wrong parameter is passed to the driver.
78 * Note that wrong parameters are replaced with the default values.
80 static void sxgbe_verify_args(void)
82 if (unlikely(eee_timer < 0))
83 eee_timer = SXGBE_DEFAULT_LPI_TIMER;
86 static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
88 /* Check and enter in LPI mode */
89 if (!priv->tx_path_in_lpi_mode)
90 priv->hw->mac->set_eee_mode(priv->ioaddr);
93 void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
95 /* Exit and disable EEE in case of we are are in LPI state. */
96 priv->hw->mac->reset_eee_mode(priv->ioaddr);
97 del_timer_sync(&priv->eee_ctrl_timer);
98 priv->tx_path_in_lpi_mode = false;
102 * sxgbe_eee_ctrl_timer
105 * If there is no data transfer and if we are not in LPI state,
106 * then MAC Transmitter can be moved to LPI state.
108 static void sxgbe_eee_ctrl_timer(unsigned long arg)
110 struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
112 sxgbe_enable_eee_mode(priv);
113 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
118 * @priv: private device pointer
120 * If the EEE support has been enabled while configuring the driver,
121 * if the GMAC actually supports the EEE (from the HW cap reg) and the
122 * phy can also manage EEE, so enable the LPI state and start the timer
123 * to verify if the tx path can enter in LPI state.
125 bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
129 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1))
135 priv->eee_active = 1;
136 init_timer(&priv->eee_ctrl_timer);
137 priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
138 priv->eee_ctrl_timer.data = (unsigned long)priv;
139 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
140 add_timer(&priv->eee_ctrl_timer);
142 priv->hw->mac->set_eee_timer(priv->ioaddr,
143 SXGBE_DEFAULT_LPI_TIMER,
146 pr_info("Energy-Efficient Ethernet initialized\n");
154 static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
156 /* When the EEE has been already initialised we have to
157 * modify the PLS bit in the LPI ctrl & status reg according
158 * to the PHY link status. For this reason.
160 if (priv->eee_enabled)
161 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
165 * sxgbe_clk_csr_set - dynamically set the MDC clock
166 * @priv: driver private structure
167 * Description: this is to dynamically set the MDC clock according to the csr
170 static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
172 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
174 /* assign the proper divider, this will be used during
177 if (clk_rate < SXGBE_CSR_F_150M)
178 priv->clk_csr = SXGBE_CSR_100_150M;
179 else if (clk_rate <= SXGBE_CSR_F_250M)
180 priv->clk_csr = SXGBE_CSR_150_250M;
181 else if (clk_rate <= SXGBE_CSR_F_300M)
182 priv->clk_csr = SXGBE_CSR_250_300M;
183 else if (clk_rate <= SXGBE_CSR_F_350M)
184 priv->clk_csr = SXGBE_CSR_300_350M;
185 else if (clk_rate <= SXGBE_CSR_F_400M)
186 priv->clk_csr = SXGBE_CSR_350_400M;
187 else if (clk_rate <= SXGBE_CSR_F_500M)
188 priv->clk_csr = SXGBE_CSR_400_500M;
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
194 static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
196 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
201 * @dev: net device structure
202 * Description: it adjusts the link parameters.
204 static void sxgbe_adjust_link(struct net_device *dev)
206 struct sxgbe_priv_data *priv = netdev_priv(dev);
207 struct phy_device *phydev = priv->phydev;
214 /* SXGBE is not supporting auto-negotiation and
215 * half duplex mode. so, not handling duplex change
216 * in this function. only handling speed and link status
219 if (phydev->speed != priv->speed) {
221 switch (phydev->speed) {
223 speed = SXGBE_SPEED_10G;
226 speed = SXGBE_SPEED_2_5G;
229 speed = SXGBE_SPEED_1G;
232 netif_err(priv, link, dev,
233 "Speed (%d) not supported\n",
237 priv->speed = phydev->speed;
238 priv->hw->mac->set_speed(priv->ioaddr, speed);
241 if (!priv->oldlink) {
245 } else if (priv->oldlink) {
248 priv->speed = SPEED_UNKNOWN;
251 if (new_state & netif_msg_link(priv))
252 phy_print_status(phydev);
254 /* Alter the MAC settings for EEE */
255 sxgbe_eee_adjust(priv);
259 * sxgbe_init_phy - PHY initialization
260 * @dev: net device structure
261 * Description: it initializes the driver's PHY state, and attaches the PHY
266 static int sxgbe_init_phy(struct net_device *ndev)
268 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
269 char bus_id[MII_BUS_ID_SIZE];
270 struct phy_device *phydev;
271 struct sxgbe_priv_data *priv = netdev_priv(ndev);
272 int phy_iface = priv->plat->interface;
274 /* assign default link status */
276 priv->speed = SPEED_UNKNOWN;
277 priv->oldduplex = DUPLEX_UNKNOWN;
279 if (priv->plat->phy_bus_name)
280 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
281 priv->plat->phy_bus_name, priv->plat->bus_id);
283 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
286 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
287 priv->plat->phy_addr);
288 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
290 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
292 if (IS_ERR(phydev)) {
293 netdev_err(ndev, "Could not attach to PHY\n");
294 return PTR_ERR(phydev);
297 /* Stop Advertising 1000BASE Capability if interface is not GMII */
298 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
299 (phy_iface == PHY_INTERFACE_MODE_RMII))
300 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
301 SUPPORTED_1000baseT_Full);
302 if (phydev->phy_id == 0) {
303 phy_disconnect(phydev);
307 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
308 __func__, phydev->phy_id, phydev->link);
310 /* save phy device in private structure */
311 priv->phydev = phydev;
317 * sxgbe_clear_descriptors: clear descriptors
318 * @priv: driver private structure
319 * Description: this function is called to clear the tx and rx descriptors
320 * in case of both basic and extended descriptors are used.
322 static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
325 unsigned int txsize = priv->dma_tx_size;
326 unsigned int rxsize = priv->dma_rx_size;
328 /* Clear the Rx/Tx descriptors */
329 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
330 for (i = 0; i < rxsize; i++)
331 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
332 priv->use_riwt, priv->mode,
336 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
337 for (i = 0; i < txsize; i++)
338 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
342 static int sxgbe_init_rx_buffers(struct net_device *dev,
343 struct sxgbe_rx_norm_desc *p, int i,
344 unsigned int dma_buf_sz,
345 struct sxgbe_rx_queue *rx_ring)
347 struct sxgbe_priv_data *priv = netdev_priv(dev);
350 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
354 rx_ring->rx_skbuff[i] = skb;
355 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
356 dma_buf_sz, DMA_FROM_DEVICE);
358 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
359 netdev_err(dev, "%s: DMA mapping error\n", __func__);
360 dev_kfree_skb_any(skb);
364 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
369 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure
371 * @tx_ring: ring to be intialised
372 * @tx_rsize: ring size
373 * Description: this function initializes the DMA TX descriptor
375 static int init_tx_ring(struct device *dev, u8 queue_no,
376 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
378 /* TX ring is not allcoated */
380 dev_err(dev, "No memory for TX queue of SXGBE\n");
384 /* allocate memory for TX descriptors */
385 tx_ring->dma_tx = dma_zalloc_coherent(dev,
386 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
387 &tx_ring->dma_tx_phy, GFP_KERNEL);
388 if (!tx_ring->dma_tx)
391 /* allocate memory for TX skbuff array */
392 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
393 sizeof(dma_addr_t), GFP_KERNEL);
394 if (!tx_ring->tx_skbuff_dma)
397 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
398 sizeof(struct sk_buff *), GFP_KERNEL);
400 if (!tx_ring->tx_skbuff)
403 /* assign queue number */
404 tx_ring->queue_no = queue_no;
406 /* initalise counters */
407 tx_ring->dirty_tx = 0;
410 /* initalise TX queue lock */
411 spin_lock_init(&tx_ring->tx_lock);
416 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
417 tx_ring->dma_tx, tx_ring->dma_tx_phy);
422 * free_rx_ring - free the RX descriptor ring
423 * @dev: net device structure
424 * @rx_ring: ring to be intialised
425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor
428 void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
433 kfree(rx_ring->rx_skbuff_dma);
434 kfree(rx_ring->rx_skbuff);
438 * init_rx_ring - init the RX descriptor ring
439 * @dev: net device structure
440 * @rx_ring: ring to be intialised
441 * @rx_rsize: ring size
442 * Description: this function initializes the DMA RX descriptor
444 static int init_rx_ring(struct net_device *dev, u8 queue_no,
445 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
447 struct sxgbe_priv_data *priv = netdev_priv(dev);
449 unsigned int bfsize = 0;
450 unsigned int ret = 0;
452 /* Set the max buffer size according to the MTU. */
453 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
455 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
457 /* RX ring is not allcoated */
458 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n");
463 /* assign queue number */
464 rx_ring->queue_no = queue_no;
466 /* allocate memory for RX descriptors */
467 rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
468 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
469 &rx_ring->dma_rx_phy, GFP_KERNEL);
471 if (rx_ring->dma_rx == NULL)
474 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL);
477 if (rx_ring->rx_skbuff_dma == NULL)
480 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
481 sizeof(struct sk_buff *), GFP_KERNEL);
482 if (rx_ring->rx_skbuff == NULL)
485 /* initialise the buffers */
486 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
487 struct sxgbe_rx_norm_desc *p;
488 p = rx_ring->dma_rx + desc_index;
489 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
492 goto err_init_rx_buffers;
495 /* initalise counters */
497 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
498 priv->dma_buf_sz = bfsize;
503 while (--desc_index >= 0)
504 free_rx_ring(priv->device, rx_ring, desc_index);
505 kfree(rx_ring->rx_skbuff);
507 kfree(rx_ring->rx_skbuff_dma);
509 dma_free_coherent(priv->device,
510 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
511 rx_ring->dma_rx, rx_ring->dma_rx_phy);
516 * free_tx_ring - free the TX descriptor ring
517 * @dev: net device structure
518 * @tx_ring: ring to be intialised
519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor
522 void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
530 * init_dma_desc_rings - init the RX/TX descriptor rings
531 * @dev: net device structure
532 * Description: this function initializes the DMA RX/TX descriptors
533 * and allocates the socket buffers. It suppors the chained and ring
536 static int init_dma_desc_rings(struct net_device *netd)
539 struct sxgbe_priv_data *priv = netdev_priv(netd);
540 int tx_rsize = priv->dma_tx_size;
541 int rx_rsize = priv->dma_rx_size;
543 /* Allocate memory for queue structures and TX descs */
544 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
545 ret = init_tx_ring(priv->device, queue_num,
546 priv->txq[queue_num], tx_rsize);
548 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
552 /* save private pointer in each ring this
553 * pointer is needed during cleaing TX queue
555 priv->txq[queue_num]->priv_ptr = priv;
558 /* Allocate memory for queue structures and RX descs */
559 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
560 ret = init_rx_ring(netd, queue_num,
561 priv->rxq[queue_num], rx_rsize);
563 netdev_err(netd, "RX DMA ring allocation failed!!\n");
567 /* save private pointer in each ring this
568 * pointer is needed during cleaing TX queue
570 priv->rxq[queue_num]->priv_ptr = priv;
573 sxgbe_clear_descriptors(priv);
579 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
584 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
588 static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
591 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
592 int tx_rsize = priv->dma_tx_size;
594 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
595 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
597 if (txqueue->tx_skbuff_dma[dma_desc])
598 dma_unmap_single(priv->device,
599 txqueue->tx_skbuff_dma[dma_desc],
600 priv->hw->desc->get_tx_len(tdesc),
603 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
604 txqueue->tx_skbuff[dma_desc] = NULL;
605 txqueue->tx_skbuff_dma[dma_desc] = 0;
610 static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
614 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
615 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
616 tx_free_ring_skbufs(tqueue);
620 static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
623 int tx_rsize = priv->dma_tx_size;
624 int rx_rsize = priv->dma_rx_size;
626 /* Release the DMA TX buffers */
627 dma_free_tx_skbufs(priv);
629 /* Release the TX ring memory also */
630 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
631 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
634 /* Release the RX ring memory also */
635 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
636 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
640 static int txring_mem_alloc(struct sxgbe_priv_data *priv)
644 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
645 priv->txq[queue_num] = devm_kmalloc(priv->device,
646 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
647 if (!priv->txq[queue_num])
654 static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
658 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
659 priv->rxq[queue_num] = devm_kmalloc(priv->device,
660 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
661 if (!priv->rxq[queue_num])
669 * sxgbe_mtl_operation_mode - HW MTL operation mode
670 * @priv: driver private structure
671 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
672 * or Store-And-Forward capability.
674 static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
678 /* TX/RX threshold control */
679 if (likely(priv->plat->force_sf_dma_mode)) {
680 /* set TC mode for TX QUEUES */
681 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
682 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
684 priv->tx_tc = SXGBE_MTL_SFMODE;
686 /* set TC mode for RX QUEUES */
687 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
688 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
690 priv->rx_tc = SXGBE_MTL_SFMODE;
691 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
692 /* set TC mode for TX QUEUES */
693 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
694 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
696 /* set TC mode for RX QUEUES */
697 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
698 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
701 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
706 * sxgbe_tx_queue_clean:
707 * @priv: driver private structure
708 * Description: it reclaims resources after transmission completes.
710 static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
712 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
713 unsigned int tx_rsize = priv->dma_tx_size;
714 struct netdev_queue *dev_txq;
715 u8 queue_no = tqueue->queue_no;
717 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
719 spin_lock(&tqueue->tx_lock);
721 priv->xstats.tx_clean++;
722 while (tqueue->dirty_tx != tqueue->cur_tx) {
723 unsigned int entry = tqueue->dirty_tx % tx_rsize;
724 struct sk_buff *skb = tqueue->tx_skbuff[entry];
725 struct sxgbe_tx_norm_desc *p;
727 p = tqueue->dma_tx + entry;
729 /* Check if the descriptor is owned by the DMA. */
730 if (priv->hw->desc->get_tx_owner(p))
733 if (netif_msg_tx_done(priv))
734 pr_debug("%s: curr %d, dirty %d\n",
735 __func__, tqueue->cur_tx, tqueue->dirty_tx);
737 if (likely(tqueue->tx_skbuff_dma[entry])) {
738 dma_unmap_single(priv->device,
739 tqueue->tx_skbuff_dma[entry],
740 priv->hw->desc->get_tx_len(p),
742 tqueue->tx_skbuff_dma[entry] = 0;
747 tqueue->tx_skbuff[entry] = NULL;
750 priv->hw->desc->release_tx_desc(p);
756 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
757 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
758 netif_tx_lock(priv->dev);
759 if (netif_tx_queue_stopped(dev_txq) &&
760 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
761 if (netif_msg_tx_done(priv))
762 pr_debug("%s: restart transmit\n", __func__);
763 netif_tx_wake_queue(dev_txq);
765 netif_tx_unlock(priv->dev);
768 spin_unlock(&tqueue->tx_lock);
773 * @priv: driver private structure
774 * Description: it reclaims resources after transmission completes.
776 static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
780 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
781 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
783 sxgbe_tx_queue_clean(tqueue);
786 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
787 sxgbe_enable_eee_mode(priv);
788 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
793 * sxgbe_restart_tx_queue: irq tx error mng function
794 * @priv: driver private structure
795 * Description: it cleans the descriptors and restarts the transmission
798 static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
800 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
801 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
805 netif_tx_stop_queue(dev_txq);
807 /* stop the tx dma */
808 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
810 /* free the skbuffs of the ring */
811 tx_free_ring_skbufs(tx_ring);
813 /* initalise counters */
815 tx_ring->dirty_tx = 0;
817 /* start the tx dma */
818 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
820 priv->dev->stats.tx_errors++;
822 /* wakeup the queue */
823 netif_tx_wake_queue(dev_txq);
827 * sxgbe_reset_all_tx_queues: irq tx error mng function
828 * @priv: driver private structure
829 * Description: it cleans all the descriptors and
830 * restarts the transmission on all queues in case of errors.
832 static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
836 /* On TX timeout of net device, resetting of all queues
837 * may not be proper way, revisit this later if needed
839 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
840 sxgbe_restart_tx_queue(priv, queue_num);
844 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
845 * @priv: driver private structure
847 * new GMAC chip generations have a new register to indicate the
848 * presence of the optional feature/functions.
849 * This can be also used to override the value passed through the
850 * platform and necessary for old MAC10/100 and GMAC chips.
852 static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
855 struct sxgbe_hw_features *features = &priv->hw_cap;
857 /* Read First Capability Register CAP[0] */
858 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
860 features->pmt_remote_wake_up =
861 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
862 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
863 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
864 features->tx_csum_offload =
865 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
866 features->rx_csum_offload =
867 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
868 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
869 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
870 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
871 features->eee = SXGBE_HW_FEAT_EEE(rval);
874 /* Read First Capability Register CAP[1] */
875 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
877 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
878 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
879 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
880 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
881 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
882 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
883 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
884 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
885 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
886 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
889 /* Read First Capability Register CAP[2] */
890 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
892 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
893 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
894 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
895 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
896 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
897 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
904 * sxgbe_check_ether_addr: check if the MAC addr is valid
905 * @priv: driver private structure
907 * it is to verify if the MAC address is valid, in case of failures it
908 * generates a random MAC address
910 static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
912 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
913 priv->hw->mac->get_umac_addr((void __iomem *)
915 priv->dev->dev_addr, 0);
916 if (!is_valid_ether_addr(priv->dev->dev_addr))
917 eth_hw_addr_random(priv->dev);
919 dev_info(priv->device, "device MAC address %pM\n",
920 priv->dev->dev_addr);
924 * sxgbe_init_dma_engine: DMA init.
925 * @priv: driver private structure
927 * It inits the DMA invoking the specific SXGBE callback.
928 * Some DMA parameters can be passed from the platform;
929 * in case of these are not passed a default is kept for the MAC or GMAC.
931 static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
933 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
936 if (priv->plat->dma_cfg) {
937 pbl = priv->plat->dma_cfg->pbl;
938 fixed_burst = priv->plat->dma_cfg->fixed_burst;
939 burst_map = priv->plat->dma_cfg->burst_map;
942 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
943 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
945 (priv->txq[queue_num])->dma_tx_phy,
946 (priv->rxq[queue_num])->dma_rx_phy,
947 priv->dma_tx_size, priv->dma_rx_size);
949 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
953 * sxgbe_init_mtl_engine: MTL init.
954 * @priv: driver private structure
956 * It inits the MTL invoking the specific SXGBE callback.
958 static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
962 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
963 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
964 priv->hw_cap.tx_mtl_qsize);
965 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
970 * sxgbe_disable_mtl_engine: MTL disable.
971 * @priv: driver private structure
973 * It disables the MTL queues by invoking the specific SXGBE callback.
975 static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
979 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
980 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
985 * sxgbe_tx_timer: mitigation sw timer for tx.
986 * @data: data pointer
988 * This is the timer handler to directly invoke the sxgbe_tx_clean.
990 static void sxgbe_tx_timer(unsigned long data)
992 struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
993 sxgbe_tx_queue_clean(p);
997 * sxgbe_init_tx_coalesce: init tx mitigation options.
998 * @priv: driver private structure
1000 * This inits the transmit coalesce parameters: i.e. timer rate,
1001 * timer handler and default threshold used for enabling the
1002 * interrupt on completion bit.
1004 static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1008 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1009 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1010 p->tx_coal_frames = SXGBE_TX_FRAMES;
1011 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1012 init_timer(&p->txtimer);
1013 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1014 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
1015 p->txtimer.function = sxgbe_tx_timer;
1016 add_timer(&p->txtimer);
1020 static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1024 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1025 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1026 del_timer_sync(&p->txtimer);
1031 * sxgbe_open - open entry point of the driver
1032 * @dev : pointer to the device structure.
1034 * This function is the open entry point of the driver.
1036 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1039 static int sxgbe_open(struct net_device *dev)
1041 struct sxgbe_priv_data *priv = netdev_priv(dev);
1044 clk_prepare_enable(priv->sxgbe_clk);
1046 sxgbe_check_ether_addr(priv);
1049 ret = sxgbe_init_phy(dev);
1051 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1056 /* Create and initialize the TX/RX descriptors chains. */
1057 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1058 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1059 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1060 priv->tx_tc = TC_DEFAULT;
1061 priv->rx_tc = TC_DEFAULT;
1062 init_dma_desc_rings(dev);
1064 /* DMA initialization and SW reset */
1065 ret = sxgbe_init_dma_engine(priv);
1067 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1071 /* MTL initialization */
1072 sxgbe_init_mtl_engine(priv);
1074 /* Copy the MAC addr into the HW */
1075 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr);
1079 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1080 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1083 /* Request the IRQ lines */
1084 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1085 IRQF_SHARED, dev->name, dev);
1086 if (unlikely(ret < 0)) {
1087 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1088 __func__, priv->irq, ret);
1092 /* If the LPI irq is different from the mac irq
1093 * register a dedicated handler
1095 if (priv->lpi_irq != dev->irq) {
1096 ret = devm_request_irq(priv->device, priv->lpi_irq,
1097 sxgbe_common_interrupt,
1098 IRQF_SHARED, dev->name, dev);
1099 if (unlikely(ret < 0)) {
1100 netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1101 __func__, priv->lpi_irq, ret);
1106 /* Request TX DMA irq lines */
1107 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1108 ret = devm_request_irq(priv->device,
1109 (priv->txq[queue_num])->irq_no,
1110 sxgbe_tx_interrupt, 0,
1111 dev->name, priv->txq[queue_num]);
1112 if (unlikely(ret < 0)) {
1113 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1114 __func__, priv->irq, ret);
1119 /* Request RX DMA irq lines */
1120 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1121 ret = devm_request_irq(priv->device,
1122 (priv->rxq[queue_num])->irq_no,
1123 sxgbe_rx_interrupt, 0,
1124 dev->name, priv->rxq[queue_num]);
1125 if (unlikely(ret < 0)) {
1126 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1127 __func__, priv->irq, ret);
1132 /* Enable the MAC Rx/Tx */
1133 priv->hw->mac->enable_tx(priv->ioaddr, true);
1134 priv->hw->mac->enable_rx(priv->ioaddr, true);
1136 /* Set the HW DMA mode and the COE */
1137 sxgbe_mtl_operation_mode(priv);
1139 /* Extra statistics */
1140 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1142 priv->xstats.tx_threshold = priv->tx_tc;
1143 priv->xstats.rx_threshold = priv->rx_tc;
1145 /* Start the ball rolling... */
1146 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1147 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1148 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1151 phy_start(priv->phydev);
1153 /* initalise TX coalesce parameters */
1154 sxgbe_tx_init_coalesce(priv);
1156 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1157 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1158 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1161 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1162 priv->eee_enabled = sxgbe_eee_init(priv);
1164 napi_enable(&priv->napi);
1165 netif_start_queue(dev);
1170 free_dma_desc_resources(priv);
1172 phy_disconnect(priv->phydev);
1174 clk_disable_unprepare(priv->sxgbe_clk);
1180 * sxgbe_release - close entry point of the driver
1181 * @dev : device pointer.
1183 * This is the stop entry point of the driver.
1185 static int sxgbe_release(struct net_device *dev)
1187 struct sxgbe_priv_data *priv = netdev_priv(dev);
1189 if (priv->eee_enabled)
1190 del_timer_sync(&priv->eee_ctrl_timer);
1192 /* Stop and disconnect the PHY */
1194 phy_stop(priv->phydev);
1195 phy_disconnect(priv->phydev);
1196 priv->phydev = NULL;
1199 netif_tx_stop_all_queues(dev);
1201 napi_disable(&priv->napi);
1203 /* delete TX timers */
1204 sxgbe_tx_del_timer(priv);
1206 /* Stop TX/RX DMA and clear the descriptors */
1207 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1208 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1210 /* disable MTL queue */
1211 sxgbe_disable_mtl_engine(priv);
1213 /* Release and free the Rx/Tx resources */
1214 free_dma_desc_resources(priv);
1216 /* Disable the MAC Rx/Tx */
1217 priv->hw->mac->enable_tx(priv->ioaddr, false);
1218 priv->hw->mac->enable_rx(priv->ioaddr, false);
1220 clk_disable_unprepare(priv->sxgbe_clk);
1225 /* Prepare first Tx descriptor for doing TSO operation */
1226 void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1227 struct sxgbe_tx_norm_desc *first_desc,
1228 struct sk_buff *skb)
1230 unsigned int total_hdr_len, tcp_hdr_len;
1232 /* Write first Tx descriptor with appropriate value */
1233 tcp_hdr_len = tcp_hdrlen(skb);
1234 total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1236 first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1237 total_hdr_len, DMA_TO_DEVICE);
1238 if (dma_mapping_error(priv->device, first_desc->tdes01))
1239 pr_err("%s: TX dma mapping failed!!\n", __func__);
1241 first_desc->tdes23.tx_rd_des23.first_desc = 1;
1242 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1244 skb->len - total_hdr_len);
1248 * sxgbe_xmit: Tx entry point of the driver
1249 * @skb : the socket buffer
1250 * @dev : device pointer
1251 * Description : this is the tx entry point of the driver.
1252 * It programs the chain or the ring and supports oversized frames
1255 static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1257 unsigned int entry, frag_num;
1259 struct netdev_queue *dev_txq;
1260 unsigned txq_index = skb_get_queue_mapping(skb);
1261 struct sxgbe_priv_data *priv = netdev_priv(dev);
1262 unsigned int tx_rsize = priv->dma_tx_size;
1263 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1264 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1265 struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1266 int nr_frags = skb_shinfo(skb)->nr_frags;
1267 int no_pagedlen = skb_headlen(skb);
1269 u16 cur_mss = skb_shinfo(skb)->gso_size;
1270 u32 ctxt_desc_req = 0;
1272 /* get the TX queue handle */
1273 dev_txq = netdev_get_tx_queue(dev, txq_index);
1275 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1278 if (unlikely(vlan_tx_tag_present(skb) ||
1279 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1280 tqueue->hwts_tx_en)))
1283 /* get the spinlock */
1284 spin_lock(&tqueue->tx_lock);
1286 if (priv->tx_path_in_lpi_mode)
1287 sxgbe_disable_eee_mode(priv);
1289 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1290 if (!netif_tx_queue_stopped(dev_txq)) {
1291 netif_tx_stop_queue(dev_txq);
1292 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1293 __func__, txq_index);
1295 /* release the spin lock in case of BUSY */
1296 spin_unlock(&tqueue->tx_lock);
1297 return NETDEV_TX_BUSY;
1300 entry = tqueue->cur_tx % tx_rsize;
1301 tx_desc = tqueue->dma_tx + entry;
1303 first_desc = tx_desc;
1305 ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1307 /* save the skb address */
1308 tqueue->tx_skbuff[entry] = skb;
1311 if (likely(skb_is_gso(skb))) {
1313 if (unlikely(tqueue->prev_mss != cur_mss)) {
1314 priv->hw->desc->tx_ctxt_desc_set_mss(
1315 ctxt_desc, cur_mss);
1316 priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1318 priv->hw->desc->tx_ctxt_desc_reset_ostc(
1320 priv->hw->desc->tx_ctxt_desc_set_ctxt(
1322 priv->hw->desc->tx_ctxt_desc_set_owner(
1325 entry = (++tqueue->cur_tx) % tx_rsize;
1326 first_desc = tqueue->dma_tx + entry;
1328 tqueue->prev_mss = cur_mss;
1330 sxgbe_tso_prepare(priv, first_desc, skb);
1332 tx_desc->tdes01 = dma_map_single(priv->device,
1333 skb->data, no_pagedlen, DMA_TO_DEVICE);
1334 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1335 netdev_err(dev, "%s: TX dma mapping failed!!\n",
1338 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1339 no_pagedlen, cksum_flag);
1343 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1344 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1345 int len = skb_frag_size(frag);
1347 entry = (++tqueue->cur_tx) % tx_rsize;
1348 tx_desc = tqueue->dma_tx + entry;
1349 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1352 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1353 tqueue->tx_skbuff[entry] = NULL;
1355 /* prepare the descriptor */
1356 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1358 /* memory barrier to flush descriptor */
1362 priv->hw->desc->set_tx_owner(tx_desc);
1365 /* close the descriptors */
1366 priv->hw->desc->close_tx_desc(tx_desc);
1368 /* memory barrier to flush descriptor */
1371 tqueue->tx_count_frames += nr_frags + 1;
1372 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1373 priv->hw->desc->clear_tx_ic(tx_desc);
1374 priv->xstats.tx_reset_ic_bit++;
1375 mod_timer(&tqueue->txtimer,
1376 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1378 tqueue->tx_count_frames = 0;
1381 /* set owner for first desc */
1382 priv->hw->desc->set_tx_owner(first_desc);
1384 /* memory barrier to flush descriptor */
1389 /* display current ring */
1390 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1391 __func__, tqueue->cur_tx % tx_rsize,
1392 tqueue->dirty_tx % tx_rsize, entry,
1393 first_desc, nr_frags);
1395 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1396 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1398 netif_tx_stop_queue(dev_txq);
1401 dev->stats.tx_bytes += skb->len;
1403 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1404 tqueue->hwts_tx_en)) {
1405 /* declare that device is doing timestamping */
1406 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1407 priv->hw->desc->tx_enable_tstamp(first_desc);
1410 if (!tqueue->hwts_tx_en)
1411 skb_tx_timestamp(skb);
1413 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1415 spin_unlock(&tqueue->tx_lock);
1417 return NETDEV_TX_OK;
1421 * sxgbe_rx_refill: refill used skb preallocated buffers
1422 * @priv: driver private structure
1423 * Description : this is to reallocate the skb for the reception process
1424 * that is based on zero-copy.
1426 static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1428 unsigned int rxsize = priv->dma_rx_size;
1429 int bfsize = priv->dma_buf_sz;
1430 u8 qnum = priv->cur_rx_qnum;
1432 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1433 priv->rxq[qnum]->dirty_rx++) {
1434 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1435 struct sxgbe_rx_norm_desc *p;
1437 p = priv->rxq[qnum]->dma_rx + entry;
1439 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1440 struct sk_buff *skb;
1442 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1444 if (unlikely(skb == NULL))
1447 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1448 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1449 dma_map_single(priv->device, skb->data, bfsize,
1452 p->rdes23.rx_rd_des23.buf2_addr =
1453 priv->rxq[qnum]->rx_skbuff_dma[entry];
1456 /* Added memory barrier for RX descriptor modification */
1458 priv->hw->desc->set_rx_owner(p);
1459 priv->hw->desc->set_rx_int_on_com(p);
1460 /* Added memory barrier for RX descriptor modification */
1466 * sxgbe_rx: receive the frames from the remote host
1467 * @priv: driver private structure
1468 * @limit: napi bugget.
1469 * Description : this the function called by the napi poll method.
1470 * It gets all the frames inside the ring.
1472 static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1474 u8 qnum = priv->cur_rx_qnum;
1475 unsigned int rxsize = priv->dma_rx_size;
1476 unsigned int entry = priv->rxq[qnum]->cur_rx;
1477 unsigned int next_entry = 0;
1478 unsigned int count = 0;
1482 while (count < limit) {
1483 struct sxgbe_rx_norm_desc *p;
1484 struct sk_buff *skb;
1487 p = priv->rxq[qnum]->dma_rx + entry;
1489 if (priv->hw->desc->get_rx_owner(p))
1494 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1495 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1497 /* Read the status of the incoming frame and also get checksum
1498 * value based on whether it is enabled in SXGBE hardware or
1501 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1503 if (unlikely(status < 0)) {
1507 if (unlikely(!priv->rxcsum_insertion))
1508 checksum = CHECKSUM_NONE;
1510 skb = priv->rxq[qnum]->rx_skbuff[entry];
1513 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1515 prefetch(skb->data - NET_IP_ALIGN);
1516 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1518 frame_len = priv->hw->desc->get_rx_frame_len(p);
1520 skb_put(skb, frame_len);
1522 skb->ip_summed = checksum;
1523 if (checksum == CHECKSUM_NONE)
1524 netif_receive_skb(skb);
1526 napi_gro_receive(&priv->napi, skb);
1531 sxgbe_rx_refill(priv);
1537 * sxgbe_poll - sxgbe poll method (NAPI)
1538 * @napi : pointer to the napi structure.
1539 * @budget : maximum number of packets that the current CPU can receive from
1542 * To look at the incoming frames and clear the tx resources.
1544 static int sxgbe_poll(struct napi_struct *napi, int budget)
1546 struct sxgbe_priv_data *priv = container_of(napi,
1547 struct sxgbe_priv_data, napi);
1549 u8 qnum = priv->cur_rx_qnum;
1551 priv->xstats.napi_poll++;
1552 /* first, clean the tx queues */
1553 sxgbe_tx_all_clean(priv);
1555 work_done = sxgbe_rx(priv, budget);
1556 if (work_done < budget) {
1557 napi_complete(napi);
1558 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1566 * @dev : Pointer to net device structure
1567 * Description: this function is called when a packet transmission fails to
1568 * complete within a reasonable time. The driver will mark the error in the
1569 * netdev structure and arrange for the device to be reset to a sane state
1570 * in order to transmit a new packet.
1572 static void sxgbe_tx_timeout(struct net_device *dev)
1574 struct sxgbe_priv_data *priv = netdev_priv(dev);
1576 sxgbe_reset_all_tx_queues(priv);
1580 * sxgbe_common_interrupt - main ISR
1581 * @irq: interrupt number.
1582 * @dev_id: to pass the net device pointer.
1583 * Description: this is the main driver interrupt service routine.
1584 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1587 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1589 struct net_device *netdev = (struct net_device *)dev_id;
1590 struct sxgbe_priv_data *priv = netdev_priv(netdev);
1593 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1594 /* For LPI we need to save the tx status */
1595 if (status & TX_ENTRY_LPI_MODE) {
1596 priv->xstats.tx_lpi_entry_n++;
1597 priv->tx_path_in_lpi_mode = true;
1599 if (status & TX_EXIT_LPI_MODE) {
1600 priv->xstats.tx_lpi_exit_n++;
1601 priv->tx_path_in_lpi_mode = false;
1603 if (status & RX_ENTRY_LPI_MODE)
1604 priv->xstats.rx_lpi_entry_n++;
1605 if (status & RX_EXIT_LPI_MODE)
1606 priv->xstats.rx_lpi_exit_n++;
1612 * sxgbe_tx_interrupt - TX DMA ISR
1613 * @irq: interrupt number.
1614 * @dev_id: to pass the net device pointer.
1615 * Description: this is the tx dma interrupt service routine.
1617 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1620 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1621 struct sxgbe_priv_data *priv = txq->priv_ptr;
1623 /* get the channel status */
1624 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1626 /* check for normal path */
1627 if (likely((status & handle_tx)))
1628 napi_schedule(&priv->napi);
1630 /* check for unrecoverable error */
1631 if (unlikely((status & tx_hard_error)))
1632 sxgbe_restart_tx_queue(priv, txq->queue_no);
1634 /* check for TC configuration change */
1635 if (unlikely((status & tx_bump_tc) &&
1636 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1637 (priv->tx_tc < 512))) {
1638 /* step of TX TC is 32 till 128, otherwise 64 */
1639 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1640 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1641 txq->queue_no, priv->tx_tc);
1642 priv->xstats.tx_threshold = priv->tx_tc;
1649 * sxgbe_rx_interrupt - RX DMA ISR
1650 * @irq: interrupt number.
1651 * @dev_id: to pass the net device pointer.
1652 * Description: this is the rx dma interrupt service routine.
1654 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1657 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1658 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1660 /* get the channel status */
1661 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1664 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1665 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1666 __napi_schedule(&priv->napi);
1669 /* check for TC configuration change */
1670 if (unlikely((status & rx_bump_tc) &&
1671 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1672 (priv->rx_tc < 128))) {
1673 /* step of TC is 32 */
1675 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1676 rxq->queue_no, priv->rx_tc);
1677 priv->xstats.rx_threshold = priv->rx_tc;
1683 static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1685 u64 val = readl(ioaddr + reg_lo);
1687 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1693 /* sxgbe_get_stats64 - entry point to see statistical information of device
1694 * @dev : device pointer.
1695 * @stats : pointer to hold all the statistical information of device.
1697 * This function is a driver entry point whenever ifconfig command gets
1698 * executed to see device statistics. Statistics are number of
1699 * bytes sent or received, errors occured etc.
1701 * This function returns various statistical information of device.
1703 static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
1704 struct rtnl_link_stats64 *stats)
1706 struct sxgbe_priv_data *priv = netdev_priv(dev);
1707 void __iomem *ioaddr = priv->ioaddr;
1710 spin_lock(&priv->stats_lock);
1711 /* Freeze the counter registers before reading value otherwise it may
1712 * get updated by hardware while we are reading them
1714 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1716 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1717 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1718 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1720 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1721 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1722 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1724 stats->multicast = sxgbe_get_stat64(ioaddr,
1725 SXGBE_MMC_RXMULTILO_GCNT_REG,
1726 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1728 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1729 SXGBE_MMC_RXCRCERRLO_REG,
1730 SXGBE_MMC_RXCRCERRHI_REG);
1732 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1733 SXGBE_MMC_RXLENERRLO_REG,
1734 SXGBE_MMC_RXLENERRHI_REG);
1736 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1737 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1738 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1740 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1741 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1742 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1744 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1745 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1747 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1748 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1749 stats->tx_errors = count - stats->tx_errors;
1750 stats->tx_packets = count;
1751 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1752 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1753 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1754 spin_unlock(&priv->stats_lock);
1759 /* sxgbe_set_features - entry point to set offload features of the device.
1760 * @dev : device pointer.
1761 * @features : features which are required to be set.
1763 * This function is a driver entry point and called by Linux kernel whenever
1764 * any device features are set or reset by user.
1766 * This function returns 0 after setting or resetting device features.
1768 static int sxgbe_set_features(struct net_device *dev,
1769 netdev_features_t features)
1771 struct sxgbe_priv_data *priv = netdev_priv(dev);
1772 netdev_features_t changed = dev->features ^ features;
1774 if (changed & NETIF_F_RXCSUM) {
1775 if (features & NETIF_F_RXCSUM) {
1776 priv->hw->mac->enable_rx_csum(priv->ioaddr);
1777 priv->rxcsum_insertion = true;
1779 priv->hw->mac->disable_rx_csum(priv->ioaddr);
1780 priv->rxcsum_insertion = false;
1787 /* sxgbe_change_mtu - entry point to change MTU size for the device.
1788 * @dev : device pointer.
1789 * @new_mtu : the new MTU size for the device.
1790 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1791 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1792 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1794 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1797 static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1799 /* RFC 791, page 25, "Every internet module must be able to forward
1800 * a datagram of 68 octets without further fragmentation."
1802 if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
1803 netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
1808 /* Return if the buffer sizes will not change */
1809 if (dev->mtu == new_mtu)
1814 if (!netif_running(dev))
1817 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1818 * changed then reinitilisation of the receive ring buffers need to be
1819 * done. Hence bring interface down and bring interface back up
1822 return sxgbe_open(dev);
1825 static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1830 data = (addr[5] << 8) | addr[4];
1831 /* For MAC Addr registers se have to set the Address Enable (AE)
1832 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1835 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1836 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1837 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1841 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1842 * a device. unicast, multicast addressing
1843 * @dev : pointer to the device structure
1845 * This function is a driver entry point which gets called by the kernel
1846 * whenever different receive mode like unicast, multicast and promiscuous
1847 * must be enabled/disabled.
1851 static void sxgbe_set_rx_mode(struct net_device *dev)
1853 struct sxgbe_priv_data *priv = netdev_priv(dev);
1854 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1855 unsigned int value = 0;
1857 struct netdev_hw_addr *ha;
1860 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1861 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1863 if (dev->flags & IFF_PROMISC) {
1864 value = SXGBE_FRAME_FILTER_PR;
1866 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1867 (dev->flags & IFF_ALLMULTI)) {
1868 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
1869 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1870 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1872 } else if (!netdev_mc_empty(dev)) {
1873 /* Hash filter for multicast */
1874 value = SXGBE_FRAME_FILTER_HMC;
1876 memset(mc_filter, 0, sizeof(mc_filter));
1877 netdev_for_each_mc_addr(ha, dev) {
1878 /* The upper 6 bits of the calculated CRC are used to
1879 * index the contens of the hash table
1881 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1883 /* The most significant bit determines the register to
1884 * use (H/L) while the other 5 bits determine the bit
1885 * within the register.
1887 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1889 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1890 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1893 /* Handle multiple unicast addresses (perfect filtering) */
1894 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1895 /* Switch to promiscuous mode if more than 16 addrs
1898 value |= SXGBE_FRAME_FILTER_PR;
1900 netdev_for_each_uc_addr(ha, dev) {
1901 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1905 #ifdef FRAME_FILTER_DEBUG
1906 /* Enable Receive all mode (to debug filtering_fail errors) */
1907 value |= SXGBE_FRAME_FILTER_RA;
1909 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1911 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1912 readl(ioaddr + SXGBE_FRAME_FILTER),
1913 readl(ioaddr + SXGBE_HASH_HIGH),
1914 readl(ioaddr + SXGBE_HASH_LOW));
1918 * sxgbe_config - entry point for changing configuration mode passed on by
1920 * @dev : pointer to the device structure
1921 * @map : pointer to the device mapping structure
1923 * This function is a driver entry point which gets called by the kernel
1924 * whenever some device configuration is changed.
1926 * This function returns 0 if success and appropriate error otherwise.
1928 static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1930 struct sxgbe_priv_data *priv = netdev_priv(dev);
1932 /* Can't act on a running interface */
1933 if (dev->flags & IFF_UP)
1936 /* Don't allow changing the I/O address */
1937 if (map->base_addr != (unsigned long)priv->ioaddr) {
1938 netdev_warn(dev, "can't change I/O address\n");
1942 /* Don't allow changing the IRQ */
1943 if (map->irq != priv->irq) {
1944 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1951 #ifdef CONFIG_NET_POLL_CONTROLLER
1953 * sxgbe_poll_controller - entry point for polling receive by device
1954 * @dev : pointer to the device structure
1956 * This function is used by NETCONSOLE and other diagnostic tools
1957 * to allow network I/O with interrupts disabled.
1961 static void sxgbe_poll_controller(struct net_device *dev)
1963 struct sxgbe_priv_data *priv = netdev_priv(dev);
1965 disable_irq(priv->irq);
1966 sxgbe_rx_interrupt(priv->irq, dev);
1967 enable_irq(priv->irq);
1971 /* sxgbe_ioctl - Entry point for the Ioctl
1972 * @dev: Device pointer.
1973 * @rq: An IOCTL specefic structure, that can contain a pointer to
1974 * a proprietary structure used to pass information to the driver.
1975 * @cmd: IOCTL command
1977 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1979 static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1981 struct sxgbe_priv_data *priv = netdev_priv(dev);
1982 int ret = -EOPNOTSUPP;
1984 if (!netif_running(dev))
1993 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2002 static const struct net_device_ops sxgbe_netdev_ops = {
2003 .ndo_open = sxgbe_open,
2004 .ndo_start_xmit = sxgbe_xmit,
2005 .ndo_stop = sxgbe_release,
2006 .ndo_get_stats64 = sxgbe_get_stats64,
2007 .ndo_change_mtu = sxgbe_change_mtu,
2008 .ndo_set_features = sxgbe_set_features,
2009 .ndo_set_rx_mode = sxgbe_set_rx_mode,
2010 .ndo_tx_timeout = sxgbe_tx_timeout,
2011 .ndo_do_ioctl = sxgbe_ioctl,
2012 .ndo_set_config = sxgbe_config,
2013 #ifdef CONFIG_NET_POLL_CONTROLLER
2014 .ndo_poll_controller = sxgbe_poll_controller,
2016 .ndo_set_mac_address = eth_mac_addr,
2019 /* Get the hardware ops */
2020 static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
2022 ops_ptr->mac = sxgbe_get_core_ops();
2023 ops_ptr->desc = sxgbe_get_desc_ops();
2024 ops_ptr->dma = sxgbe_get_dma_ops();
2025 ops_ptr->mtl = sxgbe_get_mtl_ops();
2027 /* set the MDIO communication Address/Data regisers */
2028 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
2029 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
2031 /* Assigning the default link settings
2032 * no SXGBE defined default values to be set in registers,
2033 * so assigning as 0 for port and duplex
2035 ops_ptr->link.port = 0;
2036 ops_ptr->link.duplex = 0;
2037 ops_ptr->link.speed = SXGBE_SPEED_10G;
2041 * sxgbe_hw_init - Init the GMAC device
2042 * @priv: driver private structure
2043 * Description: this function checks the HW capability
2044 * (if supported) and sets the driver's features.
2046 static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2050 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2054 /* get the hardware ops */
2055 sxgbe_get_ops(priv->hw);
2057 /* get the controller id */
2058 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2059 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2060 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2061 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2062 priv->hw->ctrl_uid, priv->hw->ctrl_id);
2064 /* get the H/W features */
2065 if (!sxgbe_get_hw_features(priv))
2066 pr_info("Hardware features not found\n");
2068 if (priv->hw_cap.tx_csum_offload)
2069 pr_info("TX Checksum offload supported\n");
2071 if (priv->hw_cap.rx_csum_offload)
2072 pr_info("RX Checksum offload supported\n");
2077 static int sxgbe_sw_reset(void __iomem *addr)
2079 int retry_count = 10;
2081 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2082 while (retry_count--) {
2083 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2084 SXGBE_DMA_SOFT_RESET))
2089 if (retry_count < 0)
2097 * @device: device pointer
2098 * @plat_dat: platform data pointer
2099 * @addr: iobase memory address
2100 * Description: this is the main probe function used to
2101 * call the alloc_etherdev, allocate the priv structure.
2103 struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2104 struct sxgbe_plat_data *plat_dat,
2107 struct sxgbe_priv_data *priv;
2108 struct net_device *ndev;
2112 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2113 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2117 SET_NETDEV_DEV(ndev, device);
2119 priv = netdev_priv(ndev);
2120 priv->device = device;
2123 sxgbe_set_ethtool_ops(ndev);
2124 priv->plat = plat_dat;
2125 priv->ioaddr = addr;
2127 ret = sxgbe_sw_reset(priv->ioaddr);
2129 goto error_free_netdev;
2131 /* Verify driver arguments */
2132 sxgbe_verify_args();
2134 /* Init MAC and get the capabilities */
2135 ret = sxgbe_hw_init(priv);
2137 goto error_free_netdev;
2139 /* allocate memory resources for Descriptor rings */
2140 ret = txring_mem_alloc(priv);
2144 ret = rxring_mem_alloc(priv);
2148 ndev->netdev_ops = &sxgbe_netdev_ops;
2150 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2151 NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2153 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2154 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2156 /* assign filtering support */
2157 ndev->priv_flags |= IFF_UNICAST_FLT;
2159 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2161 /* Enable TCP segmentation offload for all DMA channels */
2162 if (priv->hw_cap.tcpseg_offload) {
2163 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2164 priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2168 /* Enable Rx checksum offload */
2169 if (priv->hw_cap.rx_csum_offload) {
2170 priv->hw->mac->enable_rx_csum(priv->ioaddr);
2171 priv->rxcsum_insertion = true;
2174 /* Initialise pause frame settings */
2178 /* Rx Watchdog is available, enable depend on platform data */
2179 if (!priv->plat->riwt_off) {
2181 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2184 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2186 spin_lock_init(&priv->stats_lock);
2188 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2189 if (IS_ERR(priv->sxgbe_clk)) {
2190 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2192 goto error_napi_del;
2195 /* If a specific clk_csr value is passed from the platform
2196 * this means that the CSR Clock Range selection cannot be
2197 * changed at run-time and it is fixed. Viceversa the driver'll try to
2198 * set the MDC clock dynamically according to the csr actual
2201 if (!priv->plat->clk_csr)
2202 sxgbe_clk_csr_set(priv);
2204 priv->clk_csr = priv->plat->clk_csr;
2206 /* MDIO bus Registration */
2207 ret = sxgbe_mdio_register(ndev);
2209 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2210 __func__, priv->plat->bus_id);
2214 ret = register_netdev(ndev);
2216 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2217 goto error_mdio_unregister;
2220 sxgbe_check_ether_addr(priv);
2224 error_mdio_unregister:
2225 sxgbe_mdio_unregister(ndev);
2227 clk_put(priv->sxgbe_clk);
2229 netif_napi_del(&priv->napi);
2240 * @ndev: net device pointer
2241 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2242 * changes the link status, releases the DMA descriptor rings.
2244 int sxgbe_drv_remove(struct net_device *ndev)
2246 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2249 netdev_info(ndev, "%s: removing driver\n", __func__);
2251 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2252 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2255 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2256 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2258 priv->hw->mac->enable_tx(priv->ioaddr, false);
2259 priv->hw->mac->enable_rx(priv->ioaddr, false);
2261 unregister_netdev(ndev);
2263 sxgbe_mdio_unregister(ndev);
2265 clk_put(priv->sxgbe_clk);
2267 netif_napi_del(&priv->napi);
2277 int sxgbe_suspend(struct net_device *ndev)
2282 int sxgbe_resume(struct net_device *ndev)
2287 int sxgbe_freeze(struct net_device *ndev)
2292 int sxgbe_restore(struct net_device *ndev)
2296 #endif /* CONFIG_PM */
2298 /* Driver is configured as Platform driver */
2299 static int __init sxgbe_init(void)
2303 ret = sxgbe_register_platform();
2308 pr_err("driver registration failed\n");
2312 static void __exit sxgbe_exit(void)
2314 sxgbe_unregister_platform();
2317 module_init(sxgbe_init);
2318 module_exit(sxgbe_exit);
2321 static int __init sxgbe_cmdline_opt(char *str)
2327 while ((opt = strsep(&str, ",")) != NULL) {
2328 if (!strncmp(opt, "eee_timer:", 6)) {
2329 if (kstrtoint(opt + 10, 0, &eee_timer))
2336 pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2340 __setup("sxgbeeth=", sxgbe_cmdline_opt);
2345 MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2347 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2348 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2350 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2351 MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2352 MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2353 MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2355 MODULE_LICENSE("GPL");