2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <net/busy_poll.h>
123 #include <linux/clk.h>
124 #include <linux/if_ether.h>
125 #include <linux/net_tstamp.h>
126 #include <linux/phy.h>
129 #include "xgbe-common.h"
131 static unsigned int ecc_sec_info_threshold = 10;
132 static unsigned int ecc_sec_warn_threshold = 10000;
133 static unsigned int ecc_sec_period = 600;
134 static unsigned int ecc_ded_threshold = 2;
135 static unsigned int ecc_ded_period = 600;
137 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
138 /* Only expose the ECC parameters if supported */
139 module_param(ecc_sec_info_threshold, uint, S_IWUSR | S_IRUGO);
140 MODULE_PARM_DESC(ecc_sec_info_threshold,
141 " ECC corrected error informational threshold setting");
143 module_param(ecc_sec_warn_threshold, uint, S_IWUSR | S_IRUGO);
144 MODULE_PARM_DESC(ecc_sec_warn_threshold,
145 " ECC corrected error warning threshold setting");
147 module_param(ecc_sec_period, uint, S_IWUSR | S_IRUGO);
148 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
150 module_param(ecc_ded_threshold, uint, S_IWUSR | S_IRUGO);
151 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
153 module_param(ecc_ded_period, uint, S_IWUSR | S_IRUGO);
154 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
157 static int xgbe_one_poll(struct napi_struct *, int);
158 static int xgbe_all_poll(struct napi_struct *, int);
159 static void xgbe_stop(struct xgbe_prv_data *);
161 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
163 struct xgbe_channel *channel_mem, *channel;
164 struct xgbe_ring *tx_ring, *rx_ring;
165 unsigned int count, i;
168 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
170 channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
174 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
179 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
184 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
185 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
186 channel->pdata = pdata;
187 channel->queue_index = i;
188 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
191 if (pdata->per_channel_irq)
192 channel->dma_irq = pdata->channel_irq[i];
194 if (i < pdata->tx_ring_count) {
195 spin_lock_init(&tx_ring->lock);
196 channel->tx_ring = tx_ring++;
199 if (i < pdata->rx_ring_count) {
200 spin_lock_init(&rx_ring->lock);
201 channel->rx_ring = rx_ring++;
204 netif_dbg(pdata, drv, pdata->netdev,
205 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
206 channel->name, channel->dma_regs, channel->dma_irq,
207 channel->tx_ring, channel->rx_ring);
210 pdata->channel = channel_mem;
211 pdata->channel_count = count;
225 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
230 kfree(pdata->channel->rx_ring);
231 kfree(pdata->channel->tx_ring);
232 kfree(pdata->channel);
234 pdata->channel = NULL;
235 pdata->channel_count = 0;
238 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
240 return (ring->rdesc_count - (ring->cur - ring->dirty));
243 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
245 return (ring->cur - ring->dirty);
248 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
249 struct xgbe_ring *ring, unsigned int count)
251 struct xgbe_prv_data *pdata = channel->pdata;
253 if (count > xgbe_tx_avail_desc(ring)) {
254 netif_info(pdata, drv, pdata->netdev,
255 "Tx queue stopped, not enough descriptors available\n");
256 netif_stop_subqueue(pdata->netdev, channel->queue_index);
257 ring->tx.queue_stopped = 1;
259 /* If we haven't notified the hardware because of xmit_more
260 * support, tell it now
262 if (ring->tx.xmit_more)
263 pdata->hw_if.tx_start_xmit(channel, ring);
265 return NETDEV_TX_BUSY;
271 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
273 unsigned int rx_buf_size;
275 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
276 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
278 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
279 ~(XGBE_RX_BUF_ALIGN - 1);
284 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
285 struct xgbe_channel *channel)
287 struct xgbe_hw_if *hw_if = &pdata->hw_if;
288 enum xgbe_int int_id;
290 if (channel->tx_ring && channel->rx_ring)
291 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
292 else if (channel->tx_ring)
293 int_id = XGMAC_INT_DMA_CH_SR_TI;
294 else if (channel->rx_ring)
295 int_id = XGMAC_INT_DMA_CH_SR_RI;
299 hw_if->enable_int(channel, int_id);
302 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
304 struct xgbe_channel *channel;
307 channel = pdata->channel;
308 for (i = 0; i < pdata->channel_count; i++, channel++)
309 xgbe_enable_rx_tx_int(pdata, channel);
312 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
313 struct xgbe_channel *channel)
315 struct xgbe_hw_if *hw_if = &pdata->hw_if;
316 enum xgbe_int int_id;
318 if (channel->tx_ring && channel->rx_ring)
319 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
320 else if (channel->tx_ring)
321 int_id = XGMAC_INT_DMA_CH_SR_TI;
322 else if (channel->rx_ring)
323 int_id = XGMAC_INT_DMA_CH_SR_RI;
327 hw_if->disable_int(channel, int_id);
330 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
332 struct xgbe_channel *channel;
335 channel = pdata->channel;
336 for (i = 0; i < pdata->channel_count; i++, channel++)
337 xgbe_disable_rx_tx_int(pdata, channel);
340 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
341 unsigned int *count, const char *area)
343 if (time_before(jiffies, *period)) {
346 *period = jiffies + (ecc_sec_period * HZ);
350 if (*count > ecc_sec_info_threshold)
351 dev_warn_once(pdata->dev,
352 "%s ECC corrected errors exceed informational threshold\n",
355 if (*count > ecc_sec_warn_threshold) {
356 dev_warn_once(pdata->dev,
357 "%s ECC corrected errors exceed warning threshold\n",
365 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
366 unsigned int *count, const char *area)
368 if (time_before(jiffies, *period)) {
371 *period = jiffies + (ecc_ded_period * HZ);
375 if (*count > ecc_ded_threshold) {
376 netdev_alert(pdata->netdev,
377 "%s ECC detected errors exceed threshold\n",
385 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
387 struct xgbe_prv_data *pdata = data;
388 unsigned int ecc_isr;
391 /* Mask status with only the interrupts we care about */
392 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
393 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
394 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
396 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
397 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
398 &pdata->tx_ded_count, "TX fifo");
401 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
402 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
403 &pdata->rx_ded_count, "RX fifo");
406 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
407 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
408 &pdata->desc_ded_count,
413 pdata->hw_if.disable_ecc_ded(pdata);
414 schedule_work(&pdata->stopdev_work);
418 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
419 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
420 &pdata->tx_sec_count, "TX fifo"))
421 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
424 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
425 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
426 &pdata->rx_sec_count, "RX fifo"))
427 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
429 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
430 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
431 &pdata->desc_sec_count, "descriptor cache"))
432 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
435 /* Clear all ECC interrupts */
436 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
441 static irqreturn_t xgbe_isr(int irq, void *data)
443 struct xgbe_prv_data *pdata = data;
444 struct xgbe_hw_if *hw_if = &pdata->hw_if;
445 struct xgbe_channel *channel;
446 unsigned int dma_isr, dma_ch_isr;
447 unsigned int mac_isr, mac_tssr, mac_mdioisr;
450 /* The DMA interrupt status register also reports MAC and MTL
451 * interrupts. So for polling mode, we just need to check for
452 * this register to be non-zero
454 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
458 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
460 for (i = 0; i < pdata->channel_count; i++) {
461 if (!(dma_isr & (1 << i)))
464 channel = pdata->channel + i;
466 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
467 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
470 /* The TI or RI interrupt bits may still be set even if using
471 * per channel DMA interrupts. Check to be sure those are not
472 * enabled before using the private data napi structure.
474 if (!pdata->per_channel_irq &&
475 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
476 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
477 if (napi_schedule_prep(&pdata->napi)) {
478 /* Disable Tx and Rx interrupts */
479 xgbe_disable_rx_tx_ints(pdata);
481 /* Turn on polling */
482 __napi_schedule_irqoff(&pdata->napi);
485 /* Don't clear Rx/Tx status if doing per channel DMA
486 * interrupts, these will be cleared by the ISR for
487 * per channel DMA interrupts.
489 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
490 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
493 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
494 pdata->ext_stats.rx_buffer_unavailable++;
496 /* Restart the device on a Fatal Bus Error */
497 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
498 schedule_work(&pdata->restart_work);
500 /* Clear interrupt signals */
501 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
504 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
505 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
507 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
510 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
511 hw_if->tx_mmc_int(pdata);
513 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
514 hw_if->rx_mmc_int(pdata);
516 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
517 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
519 netif_dbg(pdata, intr, pdata->netdev,
520 "MAC_TSSR=%#010x\n", mac_tssr);
522 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
523 /* Read Tx Timestamp to clear interrupt */
525 hw_if->get_tx_tstamp(pdata);
526 queue_work(pdata->dev_workqueue,
527 &pdata->tx_tstamp_work);
531 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
532 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
534 netif_dbg(pdata, intr, pdata->netdev,
535 "MAC_MDIOISR=%#010x\n", mac_mdioisr);
537 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
539 complete(&pdata->mdio_complete);
544 /* If there is not a separate AN irq, handle it here */
545 if (pdata->dev_irq == pdata->an_irq)
546 pdata->phy_if.an_isr(irq, pdata);
548 /* If there is not a separate ECC irq, handle it here */
549 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
550 xgbe_ecc_isr(irq, pdata);
552 /* If there is not a separate I2C irq, handle it here */
553 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
554 pdata->i2c_if.i2c_isr(irq, pdata);
559 static irqreturn_t xgbe_dma_isr(int irq, void *data)
561 struct xgbe_channel *channel = data;
562 struct xgbe_prv_data *pdata = channel->pdata;
563 unsigned int dma_status;
565 /* Per channel DMA interrupts are enabled, so we use the per
566 * channel napi structure and not the private data napi structure
568 if (napi_schedule_prep(&channel->napi)) {
569 /* Disable Tx and Rx interrupts */
570 if (pdata->channel_irq_mode)
571 xgbe_disable_rx_tx_int(pdata, channel);
573 disable_irq_nosync(channel->dma_irq);
575 /* Turn on polling */
576 __napi_schedule_irqoff(&channel->napi);
579 /* Clear Tx/Rx signals */
581 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
582 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
583 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
588 static void xgbe_tx_timer(unsigned long data)
590 struct xgbe_channel *channel = (struct xgbe_channel *)data;
591 struct xgbe_prv_data *pdata = channel->pdata;
592 struct napi_struct *napi;
594 DBGPR("-->xgbe_tx_timer\n");
596 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
598 if (napi_schedule_prep(napi)) {
599 /* Disable Tx and Rx interrupts */
600 if (pdata->per_channel_irq)
601 if (pdata->channel_irq_mode)
602 xgbe_disable_rx_tx_int(pdata, channel);
604 disable_irq_nosync(channel->dma_irq);
606 xgbe_disable_rx_tx_ints(pdata);
608 /* Turn on polling */
609 __napi_schedule(napi);
612 channel->tx_timer_active = 0;
614 DBGPR("<--xgbe_tx_timer\n");
617 static void xgbe_service(struct work_struct *work)
619 struct xgbe_prv_data *pdata = container_of(work,
620 struct xgbe_prv_data,
623 pdata->phy_if.phy_status(pdata);
626 static void xgbe_service_timer(unsigned long data)
628 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
630 queue_work(pdata->dev_workqueue, &pdata->service_work);
632 mod_timer(&pdata->service_timer, jiffies + HZ);
635 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
637 struct xgbe_channel *channel;
640 setup_timer(&pdata->service_timer, xgbe_service_timer,
641 (unsigned long)pdata);
643 channel = pdata->channel;
644 for (i = 0; i < pdata->channel_count; i++, channel++) {
645 if (!channel->tx_ring)
648 setup_timer(&channel->tx_timer, xgbe_tx_timer,
649 (unsigned long)channel);
653 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
655 mod_timer(&pdata->service_timer, jiffies + HZ);
658 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
660 struct xgbe_channel *channel;
663 del_timer_sync(&pdata->service_timer);
665 channel = pdata->channel;
666 for (i = 0; i < pdata->channel_count; i++, channel++) {
667 if (!channel->tx_ring)
670 del_timer_sync(&channel->tx_timer);
674 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
676 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
677 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
679 DBGPR("-->xgbe_get_all_hw_features\n");
681 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
682 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
683 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
685 memset(hw_feat, 0, sizeof(*hw_feat));
687 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
689 /* Hardware feature register 0 */
690 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
691 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
692 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
693 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
694 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
695 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
696 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
697 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
698 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
699 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
700 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
701 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
703 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
704 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
706 /* Hardware feature register 1 */
707 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
709 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
711 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
712 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
713 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
714 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
715 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
716 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
717 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
718 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
719 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
721 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
724 /* Hardware feature register 2 */
725 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
726 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
727 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
728 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
729 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
730 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
732 /* Translate the Hash Table size into actual number */
733 switch (hw_feat->hash_table_size) {
737 hw_feat->hash_table_size = 64;
740 hw_feat->hash_table_size = 128;
743 hw_feat->hash_table_size = 256;
747 /* Translate the address width setting into actual number */
748 switch (hw_feat->dma_width) {
750 hw_feat->dma_width = 32;
753 hw_feat->dma_width = 40;
756 hw_feat->dma_width = 48;
759 hw_feat->dma_width = 32;
762 /* The Queue, Channel and TC counts are zero based so increment them
763 * to get the actual number
767 hw_feat->rx_ch_cnt++;
768 hw_feat->tx_ch_cnt++;
771 /* Translate the fifo sizes into actual numbers */
772 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
773 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
775 DBGPR("<--xgbe_get_all_hw_features\n");
778 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
780 struct xgbe_channel *channel;
783 if (pdata->per_channel_irq) {
784 channel = pdata->channel;
785 for (i = 0; i < pdata->channel_count; i++, channel++) {
787 netif_napi_add(pdata->netdev, &channel->napi,
788 xgbe_one_poll, NAPI_POLL_WEIGHT);
790 napi_enable(&channel->napi);
794 netif_napi_add(pdata->netdev, &pdata->napi,
795 xgbe_all_poll, NAPI_POLL_WEIGHT);
797 napi_enable(&pdata->napi);
801 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
803 struct xgbe_channel *channel;
806 if (pdata->per_channel_irq) {
807 channel = pdata->channel;
808 for (i = 0; i < pdata->channel_count; i++, channel++) {
809 napi_disable(&channel->napi);
812 netif_napi_del(&channel->napi);
815 napi_disable(&pdata->napi);
818 netif_napi_del(&pdata->napi);
822 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
824 struct xgbe_channel *channel;
825 struct net_device *netdev = pdata->netdev;
829 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
830 netdev->name, pdata);
832 netdev_alert(netdev, "error requesting irq %d\n",
837 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
838 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
839 0, pdata->ecc_name, pdata);
841 netdev_alert(netdev, "error requesting ecc irq %d\n",
847 if (!pdata->per_channel_irq)
850 channel = pdata->channel;
851 for (i = 0; i < pdata->channel_count; i++, channel++) {
852 snprintf(channel->dma_irq_name,
853 sizeof(channel->dma_irq_name) - 1,
854 "%s-TxRx-%u", netdev_name(netdev),
855 channel->queue_index);
857 ret = devm_request_irq(pdata->dev, channel->dma_irq,
859 channel->dma_irq_name, channel);
861 netdev_alert(netdev, "error requesting irq %d\n",
870 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
871 for (i--, channel--; i < pdata->channel_count; i--, channel--)
872 devm_free_irq(pdata->dev, channel->dma_irq, channel);
874 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
875 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
878 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
883 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
885 struct xgbe_channel *channel;
888 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
890 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
891 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
893 if (!pdata->per_channel_irq)
896 channel = pdata->channel;
897 for (i = 0; i < pdata->channel_count; i++, channel++)
898 devm_free_irq(pdata->dev, channel->dma_irq, channel);
901 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
903 struct xgbe_hw_if *hw_if = &pdata->hw_if;
905 DBGPR("-->xgbe_init_tx_coalesce\n");
907 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
908 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
910 hw_if->config_tx_coalesce(pdata);
912 DBGPR("<--xgbe_init_tx_coalesce\n");
915 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
917 struct xgbe_hw_if *hw_if = &pdata->hw_if;
919 DBGPR("-->xgbe_init_rx_coalesce\n");
921 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
922 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
923 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
925 hw_if->config_rx_coalesce(pdata);
927 DBGPR("<--xgbe_init_rx_coalesce\n");
930 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
932 struct xgbe_desc_if *desc_if = &pdata->desc_if;
933 struct xgbe_channel *channel;
934 struct xgbe_ring *ring;
935 struct xgbe_ring_data *rdata;
938 DBGPR("-->xgbe_free_tx_data\n");
940 channel = pdata->channel;
941 for (i = 0; i < pdata->channel_count; i++, channel++) {
942 ring = channel->tx_ring;
946 for (j = 0; j < ring->rdesc_count; j++) {
947 rdata = XGBE_GET_DESC_DATA(ring, j);
948 desc_if->unmap_rdata(pdata, rdata);
952 DBGPR("<--xgbe_free_tx_data\n");
955 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
957 struct xgbe_desc_if *desc_if = &pdata->desc_if;
958 struct xgbe_channel *channel;
959 struct xgbe_ring *ring;
960 struct xgbe_ring_data *rdata;
963 DBGPR("-->xgbe_free_rx_data\n");
965 channel = pdata->channel;
966 for (i = 0; i < pdata->channel_count; i++, channel++) {
967 ring = channel->rx_ring;
971 for (j = 0; j < ring->rdesc_count; j++) {
972 rdata = XGBE_GET_DESC_DATA(ring, j);
973 desc_if->unmap_rdata(pdata, rdata);
977 DBGPR("<--xgbe_free_rx_data\n");
980 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
982 pdata->phy_link = -1;
983 pdata->phy_speed = SPEED_UNKNOWN;
985 return pdata->phy_if.phy_reset(pdata);
988 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
990 struct xgbe_prv_data *pdata = netdev_priv(netdev);
991 struct xgbe_hw_if *hw_if = &pdata->hw_if;
994 DBGPR("-->xgbe_powerdown\n");
996 if (!netif_running(netdev) ||
997 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
998 netdev_alert(netdev, "Device is already powered down\n");
999 DBGPR("<--xgbe_powerdown\n");
1003 spin_lock_irqsave(&pdata->lock, flags);
1005 if (caller == XGMAC_DRIVER_CONTEXT)
1006 netif_device_detach(netdev);
1008 netif_tx_stop_all_queues(netdev);
1010 xgbe_stop_timers(pdata);
1011 flush_workqueue(pdata->dev_workqueue);
1013 hw_if->powerdown_tx(pdata);
1014 hw_if->powerdown_rx(pdata);
1016 xgbe_napi_disable(pdata, 0);
1018 pdata->power_down = 1;
1020 spin_unlock_irqrestore(&pdata->lock, flags);
1022 DBGPR("<--xgbe_powerdown\n");
1027 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1029 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1030 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1031 unsigned long flags;
1033 DBGPR("-->xgbe_powerup\n");
1035 if (!netif_running(netdev) ||
1036 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1037 netdev_alert(netdev, "Device is already powered up\n");
1038 DBGPR("<--xgbe_powerup\n");
1042 spin_lock_irqsave(&pdata->lock, flags);
1044 pdata->power_down = 0;
1046 xgbe_napi_enable(pdata, 0);
1048 hw_if->powerup_tx(pdata);
1049 hw_if->powerup_rx(pdata);
1051 if (caller == XGMAC_DRIVER_CONTEXT)
1052 netif_device_attach(netdev);
1054 netif_tx_start_all_queues(netdev);
1056 xgbe_start_timers(pdata);
1058 spin_unlock_irqrestore(&pdata->lock, flags);
1060 DBGPR("<--xgbe_powerup\n");
1065 static int xgbe_start(struct xgbe_prv_data *pdata)
1067 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1068 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1069 struct net_device *netdev = pdata->netdev;
1072 DBGPR("-->xgbe_start\n");
1074 ret = hw_if->init(pdata);
1078 xgbe_napi_enable(pdata, 1);
1080 ret = xgbe_request_irqs(pdata);
1084 ret = phy_if->phy_start(pdata);
1088 hw_if->enable_tx(pdata);
1089 hw_if->enable_rx(pdata);
1091 netif_tx_start_all_queues(netdev);
1093 xgbe_start_timers(pdata);
1094 queue_work(pdata->dev_workqueue, &pdata->service_work);
1096 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1098 DBGPR("<--xgbe_start\n");
1103 xgbe_free_irqs(pdata);
1106 xgbe_napi_disable(pdata, 1);
1113 static void xgbe_stop(struct xgbe_prv_data *pdata)
1115 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1116 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1117 struct xgbe_channel *channel;
1118 struct net_device *netdev = pdata->netdev;
1119 struct netdev_queue *txq;
1122 DBGPR("-->xgbe_stop\n");
1124 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1127 netif_tx_stop_all_queues(netdev);
1129 xgbe_stop_timers(pdata);
1130 flush_workqueue(pdata->dev_workqueue);
1132 hw_if->disable_tx(pdata);
1133 hw_if->disable_rx(pdata);
1135 phy_if->phy_stop(pdata);
1137 xgbe_free_irqs(pdata);
1139 xgbe_napi_disable(pdata, 1);
1143 channel = pdata->channel;
1144 for (i = 0; i < pdata->channel_count; i++, channel++) {
1145 if (!channel->tx_ring)
1148 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1149 netdev_tx_reset_queue(txq);
1152 set_bit(XGBE_STOPPED, &pdata->dev_state);
1154 DBGPR("<--xgbe_stop\n");
1157 static void xgbe_stopdev(struct work_struct *work)
1159 struct xgbe_prv_data *pdata = container_of(work,
1160 struct xgbe_prv_data,
1167 xgbe_free_tx_data(pdata);
1168 xgbe_free_rx_data(pdata);
1172 netdev_alert(pdata->netdev, "device stopped\n");
1175 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1177 DBGPR("-->xgbe_restart_dev\n");
1179 /* If not running, "restart" will happen on open */
1180 if (!netif_running(pdata->netdev))
1185 xgbe_free_tx_data(pdata);
1186 xgbe_free_rx_data(pdata);
1190 DBGPR("<--xgbe_restart_dev\n");
1193 static void xgbe_restart(struct work_struct *work)
1195 struct xgbe_prv_data *pdata = container_of(work,
1196 struct xgbe_prv_data,
1201 xgbe_restart_dev(pdata);
1206 static void xgbe_tx_tstamp(struct work_struct *work)
1208 struct xgbe_prv_data *pdata = container_of(work,
1209 struct xgbe_prv_data,
1211 struct skb_shared_hwtstamps hwtstamps;
1213 unsigned long flags;
1215 if (pdata->tx_tstamp) {
1216 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1219 memset(&hwtstamps, 0, sizeof(hwtstamps));
1220 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1221 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1224 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1226 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1227 pdata->tx_tstamp_skb = NULL;
1228 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1231 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1232 struct ifreq *ifreq)
1234 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1235 sizeof(pdata->tstamp_config)))
1241 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1242 struct ifreq *ifreq)
1244 struct hwtstamp_config config;
1245 unsigned int mac_tscr;
1247 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1255 switch (config.tx_type) {
1256 case HWTSTAMP_TX_OFF:
1259 case HWTSTAMP_TX_ON:
1260 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1267 switch (config.rx_filter) {
1268 case HWTSTAMP_FILTER_NONE:
1271 case HWTSTAMP_FILTER_NTP_ALL:
1272 case HWTSTAMP_FILTER_ALL:
1273 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1274 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1277 /* PTP v2, UDP, any kind of event packet */
1278 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1279 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1280 /* PTP v1, UDP, any kind of event packet */
1281 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1282 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1283 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1284 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1285 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1288 /* PTP v2, UDP, Sync packet */
1289 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1290 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1291 /* PTP v1, UDP, Sync packet */
1292 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1293 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1294 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1295 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1296 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1299 /* PTP v2, UDP, Delay_req packet */
1300 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1301 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1302 /* PTP v1, UDP, Delay_req packet */
1303 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1304 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1305 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1306 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1307 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1308 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1311 /* 802.AS1, Ethernet, any kind of event packet */
1312 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1313 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1314 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1315 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1318 /* 802.AS1, Ethernet, Sync packet */
1319 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1320 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1321 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1322 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1325 /* 802.AS1, Ethernet, Delay_req packet */
1326 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1327 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1328 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1329 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1330 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1333 /* PTP v2/802.AS1, any layer, any kind of event packet */
1334 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1335 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1336 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1337 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1338 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1339 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1340 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1343 /* PTP v2/802.AS1, any layer, Sync packet */
1344 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1345 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1346 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1347 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1348 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1349 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1350 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1353 /* PTP v2/802.AS1, any layer, Delay_req packet */
1354 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1355 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1356 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1357 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1358 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1359 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1360 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1361 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1368 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1370 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1375 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1376 struct sk_buff *skb,
1377 struct xgbe_packet_data *packet)
1379 unsigned long flags;
1381 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1382 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1383 if (pdata->tx_tstamp_skb) {
1384 /* Another timestamp in progress, ignore this one */
1385 XGMAC_SET_BITS(packet->attributes,
1386 TX_PACKET_ATTRIBUTES, PTP, 0);
1388 pdata->tx_tstamp_skb = skb_get(skb);
1389 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1391 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1394 skb_tx_timestamp(skb);
1397 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1399 if (skb_vlan_tag_present(skb))
1400 packet->vlan_ctag = skb_vlan_tag_get(skb);
1403 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1407 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1411 ret = skb_cow_head(skb, 0);
1415 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1416 packet->tcp_header_len = tcp_hdrlen(skb);
1417 packet->tcp_payload_len = skb->len - packet->header_len;
1418 packet->mss = skb_shinfo(skb)->gso_size;
1419 DBGPR(" packet->header_len=%u\n", packet->header_len);
1420 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1421 packet->tcp_header_len, packet->tcp_payload_len);
1422 DBGPR(" packet->mss=%u\n", packet->mss);
1424 /* Update the number of packets that will ultimately be transmitted
1425 * along with the extra bytes for each extra packet
1427 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1428 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1433 static int xgbe_is_tso(struct sk_buff *skb)
1435 if (skb->ip_summed != CHECKSUM_PARTIAL)
1438 if (!skb_is_gso(skb))
1441 DBGPR(" TSO packet to be processed\n");
1446 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1447 struct xgbe_ring *ring, struct sk_buff *skb,
1448 struct xgbe_packet_data *packet)
1450 struct skb_frag_struct *frag;
1451 unsigned int context_desc;
1458 packet->rdesc_count = 0;
1460 packet->tx_packets = 1;
1461 packet->tx_bytes = skb->len;
1463 if (xgbe_is_tso(skb)) {
1464 /* TSO requires an extra descriptor if mss is different */
1465 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1467 packet->rdesc_count++;
1470 /* TSO requires an extra descriptor for TSO header */
1471 packet->rdesc_count++;
1473 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1475 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1477 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1478 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1481 if (skb_vlan_tag_present(skb)) {
1482 /* VLAN requires an extra descriptor if tag is different */
1483 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1484 /* We can share with the TSO context descriptor */
1485 if (!context_desc) {
1487 packet->rdesc_count++;
1490 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1494 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1495 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1496 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1499 for (len = skb_headlen(skb); len;) {
1500 packet->rdesc_count++;
1501 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1504 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1505 frag = &skb_shinfo(skb)->frags[i];
1506 for (len = skb_frag_size(frag); len; ) {
1507 packet->rdesc_count++;
1508 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1513 static int xgbe_open(struct net_device *netdev)
1515 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1516 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1519 DBGPR("-->xgbe_open\n");
1521 /* Reset the phy settings */
1522 ret = xgbe_phy_reset(pdata);
1526 /* Enable the clocks */
1527 ret = clk_prepare_enable(pdata->sysclk);
1529 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1533 ret = clk_prepare_enable(pdata->ptpclk);
1535 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1539 /* Calculate the Rx buffer size before allocating rings */
1540 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1543 pdata->rx_buf_size = ret;
1545 /* Allocate the channel and ring structures */
1546 ret = xgbe_alloc_channels(pdata);
1550 /* Allocate the ring descriptors and buffers */
1551 ret = desc_if->alloc_ring_resources(pdata);
1555 INIT_WORK(&pdata->service_work, xgbe_service);
1556 INIT_WORK(&pdata->restart_work, xgbe_restart);
1557 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1558 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1559 xgbe_init_timers(pdata);
1561 ret = xgbe_start(pdata);
1565 clear_bit(XGBE_DOWN, &pdata->dev_state);
1567 DBGPR("<--xgbe_open\n");
1572 desc_if->free_ring_resources(pdata);
1575 xgbe_free_channels(pdata);
1578 clk_disable_unprepare(pdata->ptpclk);
1581 clk_disable_unprepare(pdata->sysclk);
1586 static int xgbe_close(struct net_device *netdev)
1588 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1589 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1591 DBGPR("-->xgbe_close\n");
1593 /* Stop the device */
1596 /* Free the ring descriptors and buffers */
1597 desc_if->free_ring_resources(pdata);
1599 /* Free the channel and ring structures */
1600 xgbe_free_channels(pdata);
1602 /* Disable the clocks */
1603 clk_disable_unprepare(pdata->ptpclk);
1604 clk_disable_unprepare(pdata->sysclk);
1606 set_bit(XGBE_DOWN, &pdata->dev_state);
1608 DBGPR("<--xgbe_close\n");
1613 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1615 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1616 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1617 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1618 struct xgbe_channel *channel;
1619 struct xgbe_ring *ring;
1620 struct xgbe_packet_data *packet;
1621 struct netdev_queue *txq;
1624 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1626 channel = pdata->channel + skb->queue_mapping;
1627 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1628 ring = channel->tx_ring;
1629 packet = &ring->packet_data;
1633 if (skb->len == 0) {
1634 netif_err(pdata, tx_err, netdev,
1635 "empty skb received from stack\n");
1636 dev_kfree_skb_any(skb);
1637 goto tx_netdev_return;
1640 /* Calculate preliminary packet info */
1641 memset(packet, 0, sizeof(*packet));
1642 xgbe_packet_info(pdata, ring, skb, packet);
1644 /* Check that there are enough descriptors available */
1645 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1647 goto tx_netdev_return;
1649 ret = xgbe_prep_tso(skb, packet);
1651 netif_err(pdata, tx_err, netdev,
1652 "error processing TSO packet\n");
1653 dev_kfree_skb_any(skb);
1654 goto tx_netdev_return;
1656 xgbe_prep_vlan(skb, packet);
1658 if (!desc_if->map_tx_skb(channel, skb)) {
1659 dev_kfree_skb_any(skb);
1660 goto tx_netdev_return;
1663 xgbe_prep_tx_tstamp(pdata, skb, packet);
1665 /* Report on the actual number of bytes (to be) sent */
1666 netdev_tx_sent_queue(txq, packet->tx_bytes);
1668 /* Configure required descriptor fields for transmission */
1669 hw_if->dev_xmit(channel);
1671 if (netif_msg_pktdata(pdata))
1672 xgbe_print_pkt(netdev, skb, true);
1674 /* Stop the queue in advance if there may not be enough descriptors */
1675 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1683 static void xgbe_set_rx_mode(struct net_device *netdev)
1685 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1686 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1688 DBGPR("-->xgbe_set_rx_mode\n");
1690 hw_if->config_rx_mode(pdata);
1692 DBGPR("<--xgbe_set_rx_mode\n");
1695 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1697 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1698 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1699 struct sockaddr *saddr = addr;
1701 DBGPR("-->xgbe_set_mac_address\n");
1703 if (!is_valid_ether_addr(saddr->sa_data))
1704 return -EADDRNOTAVAIL;
1706 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1708 hw_if->set_mac_address(pdata, netdev->dev_addr);
1710 DBGPR("<--xgbe_set_mac_address\n");
1715 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1717 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1722 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1726 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1736 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1738 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1741 DBGPR("-->xgbe_change_mtu\n");
1743 ret = xgbe_calc_rx_buf_size(netdev, mtu);
1747 pdata->rx_buf_size = ret;
1750 xgbe_restart_dev(pdata);
1752 DBGPR("<--xgbe_change_mtu\n");
1757 static void xgbe_tx_timeout(struct net_device *netdev)
1759 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1761 netdev_warn(netdev, "tx timeout, device restarting\n");
1762 schedule_work(&pdata->restart_work);
1765 static void xgbe_get_stats64(struct net_device *netdev,
1766 struct rtnl_link_stats64 *s)
1768 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1769 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1771 DBGPR("-->%s\n", __func__);
1773 pdata->hw_if.read_mmc_stats(pdata);
1775 s->rx_packets = pstats->rxframecount_gb;
1776 s->rx_bytes = pstats->rxoctetcount_gb;
1777 s->rx_errors = pstats->rxframecount_gb -
1778 pstats->rxbroadcastframes_g -
1779 pstats->rxmulticastframes_g -
1780 pstats->rxunicastframes_g;
1781 s->multicast = pstats->rxmulticastframes_g;
1782 s->rx_length_errors = pstats->rxlengtherror;
1783 s->rx_crc_errors = pstats->rxcrcerror;
1784 s->rx_fifo_errors = pstats->rxfifooverflow;
1786 s->tx_packets = pstats->txframecount_gb;
1787 s->tx_bytes = pstats->txoctetcount_gb;
1788 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1789 s->tx_dropped = netdev->stats.tx_dropped;
1791 DBGPR("<--%s\n", __func__);
1794 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1797 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1798 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1800 DBGPR("-->%s\n", __func__);
1802 set_bit(vid, pdata->active_vlans);
1803 hw_if->update_vlan_hash_table(pdata);
1805 DBGPR("<--%s\n", __func__);
1810 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1813 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1814 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1816 DBGPR("-->%s\n", __func__);
1818 clear_bit(vid, pdata->active_vlans);
1819 hw_if->update_vlan_hash_table(pdata);
1821 DBGPR("<--%s\n", __func__);
1826 #ifdef CONFIG_NET_POLL_CONTROLLER
1827 static void xgbe_poll_controller(struct net_device *netdev)
1829 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1830 struct xgbe_channel *channel;
1833 DBGPR("-->xgbe_poll_controller\n");
1835 if (pdata->per_channel_irq) {
1836 channel = pdata->channel;
1837 for (i = 0; i < pdata->channel_count; i++, channel++)
1838 xgbe_dma_isr(channel->dma_irq, channel);
1840 disable_irq(pdata->dev_irq);
1841 xgbe_isr(pdata->dev_irq, pdata);
1842 enable_irq(pdata->dev_irq);
1845 DBGPR("<--xgbe_poll_controller\n");
1847 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1849 static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
1850 struct tc_to_netdev *tc_to_netdev)
1852 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1855 if (tc_to_netdev->type != TC_SETUP_MQPRIO)
1858 tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1859 tc = tc_to_netdev->mqprio->num_tc;
1861 if (tc > pdata->hw_feat.tc_cnt)
1864 pdata->num_tcs = tc;
1865 pdata->hw_if.config_tc(pdata);
1870 static int xgbe_set_features(struct net_device *netdev,
1871 netdev_features_t features)
1873 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1874 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1875 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1878 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1879 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1880 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1881 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1883 if ((features & NETIF_F_RXHASH) && !rxhash)
1884 ret = hw_if->enable_rss(pdata);
1885 else if (!(features & NETIF_F_RXHASH) && rxhash)
1886 ret = hw_if->disable_rss(pdata);
1890 if ((features & NETIF_F_RXCSUM) && !rxcsum)
1891 hw_if->enable_rx_csum(pdata);
1892 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1893 hw_if->disable_rx_csum(pdata);
1895 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1896 hw_if->enable_rx_vlan_stripping(pdata);
1897 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1898 hw_if->disable_rx_vlan_stripping(pdata);
1900 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1901 hw_if->enable_rx_vlan_filtering(pdata);
1902 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1903 hw_if->disable_rx_vlan_filtering(pdata);
1905 pdata->netdev_features = features;
1907 DBGPR("<--xgbe_set_features\n");
1912 static const struct net_device_ops xgbe_netdev_ops = {
1913 .ndo_open = xgbe_open,
1914 .ndo_stop = xgbe_close,
1915 .ndo_start_xmit = xgbe_xmit,
1916 .ndo_set_rx_mode = xgbe_set_rx_mode,
1917 .ndo_set_mac_address = xgbe_set_mac_address,
1918 .ndo_validate_addr = eth_validate_addr,
1919 .ndo_do_ioctl = xgbe_ioctl,
1920 .ndo_change_mtu = xgbe_change_mtu,
1921 .ndo_tx_timeout = xgbe_tx_timeout,
1922 .ndo_get_stats64 = xgbe_get_stats64,
1923 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
1924 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
1925 #ifdef CONFIG_NET_POLL_CONTROLLER
1926 .ndo_poll_controller = xgbe_poll_controller,
1928 .ndo_setup_tc = xgbe_setup_tc,
1929 .ndo_set_features = xgbe_set_features,
1932 const struct net_device_ops *xgbe_get_netdev_ops(void)
1934 return &xgbe_netdev_ops;
1937 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1939 struct xgbe_prv_data *pdata = channel->pdata;
1940 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1941 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1942 struct xgbe_ring *ring = channel->rx_ring;
1943 struct xgbe_ring_data *rdata;
1945 while (ring->dirty != ring->cur) {
1946 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1948 /* Reset rdata values */
1949 desc_if->unmap_rdata(pdata, rdata);
1951 if (desc_if->map_rx_buffer(pdata, ring, rdata))
1954 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
1959 /* Make sure everything is written before the register write */
1962 /* Update the Rx Tail Pointer Register with address of
1963 * the last cleaned entry */
1964 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1965 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1966 lower_32_bits(rdata->rdesc_dma));
1969 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1970 struct napi_struct *napi,
1971 struct xgbe_ring_data *rdata,
1974 struct sk_buff *skb;
1977 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1981 /* Pull in the header buffer which may contain just the header
1982 * or the header plus data
1984 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1985 rdata->rx.hdr.dma_off,
1986 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1988 packet = page_address(rdata->rx.hdr.pa.pages) +
1989 rdata->rx.hdr.pa.pages_offset;
1990 skb_copy_to_linear_data(skb, packet, len);
1996 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
1997 struct xgbe_packet_data *packet)
1999 /* Always zero if not the first descriptor */
2000 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2003 /* First descriptor with split header, return header length */
2004 if (rdata->rx.hdr_len)
2005 return rdata->rx.hdr_len;
2007 /* First descriptor but not the last descriptor and no split header,
2008 * so the full buffer was used
2010 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2011 return rdata->rx.hdr.dma_len;
2013 /* First descriptor and last descriptor and no split header, so
2014 * calculate how much of the buffer was used
2016 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2019 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2020 struct xgbe_packet_data *packet,
2023 /* Always the full buffer if not the last descriptor */
2024 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2025 return rdata->rx.buf.dma_len;
2027 /* Last descriptor so calculate how much of the buffer was used
2028 * for the last bit of data
2030 return rdata->rx.len - len;
2033 static int xgbe_tx_poll(struct xgbe_channel *channel)
2035 struct xgbe_prv_data *pdata = channel->pdata;
2036 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2037 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2038 struct xgbe_ring *ring = channel->tx_ring;
2039 struct xgbe_ring_data *rdata;
2040 struct xgbe_ring_desc *rdesc;
2041 struct net_device *netdev = pdata->netdev;
2042 struct netdev_queue *txq;
2044 unsigned int tx_packets = 0, tx_bytes = 0;
2047 DBGPR("-->xgbe_tx_poll\n");
2049 /* Nothing to do if there isn't a Tx ring for this channel */
2055 /* Be sure we get ring->cur before accessing descriptor data */
2058 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2060 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2061 (ring->dirty != cur)) {
2062 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2063 rdesc = rdata->rdesc;
2065 if (!hw_if->tx_complete(rdesc))
2068 /* Make sure descriptor fields are read after reading the OWN
2072 if (netif_msg_tx_done(pdata))
2073 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2075 if (hw_if->is_last_desc(rdesc)) {
2076 tx_packets += rdata->tx.packets;
2077 tx_bytes += rdata->tx.bytes;
2080 /* Free the SKB and reset the descriptor for re-use */
2081 desc_if->unmap_rdata(pdata, rdata);
2082 hw_if->tx_desc_reset(rdata);
2091 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2093 if ((ring->tx.queue_stopped == 1) &&
2094 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2095 ring->tx.queue_stopped = 0;
2096 netif_tx_wake_queue(txq);
2099 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2104 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2106 struct xgbe_prv_data *pdata = channel->pdata;
2107 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2108 struct xgbe_ring *ring = channel->rx_ring;
2109 struct xgbe_ring_data *rdata;
2110 struct xgbe_packet_data *packet;
2111 struct net_device *netdev = pdata->netdev;
2112 struct napi_struct *napi;
2113 struct sk_buff *skb;
2114 struct skb_shared_hwtstamps *hwtstamps;
2115 unsigned int last, error, context_next, context;
2116 unsigned int len, buf1_len, buf2_len, max_len;
2117 unsigned int received = 0;
2118 int packet_count = 0;
2120 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2122 /* Nothing to do if there isn't a Rx ring for this channel */
2129 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2131 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2132 packet = &ring->packet_data;
2133 while (packet_count < budget) {
2134 DBGPR(" cur = %d\n", ring->cur);
2136 /* First time in loop see if we need to restore state */
2137 if (!received && rdata->state_saved) {
2138 skb = rdata->state.skb;
2139 error = rdata->state.error;
2140 len = rdata->state.len;
2142 memset(packet, 0, sizeof(*packet));
2149 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2151 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2152 xgbe_rx_refresh(channel);
2154 if (hw_if->dev_read(channel))
2160 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2162 context_next = XGMAC_GET_BITS(packet->attributes,
2163 RX_PACKET_ATTRIBUTES,
2165 context = XGMAC_GET_BITS(packet->attributes,
2166 RX_PACKET_ATTRIBUTES,
2169 /* Earlier error, just drain the remaining data */
2170 if ((!last || context_next) && error)
2173 if (error || packet->errors) {
2175 netif_err(pdata, rx_err, netdev,
2176 "error in received packet\n");
2182 /* Get the data length in the descriptor buffers */
2183 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2185 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2189 skb = xgbe_create_skb(pdata, napi, rdata,
2198 dma_sync_single_range_for_cpu(pdata->dev,
2199 rdata->rx.buf.dma_base,
2200 rdata->rx.buf.dma_off,
2201 rdata->rx.buf.dma_len,
2204 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2205 rdata->rx.buf.pa.pages,
2206 rdata->rx.buf.pa.pages_offset,
2208 rdata->rx.buf.dma_len);
2209 rdata->rx.buf.pa.pages = NULL;
2214 if (!last || context_next)
2220 /* Be sure we don't exceed the configured MTU */
2221 max_len = netdev->mtu + ETH_HLEN;
2222 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2223 (skb->protocol == htons(ETH_P_8021Q)))
2224 max_len += VLAN_HLEN;
2226 if (skb->len > max_len) {
2227 netif_err(pdata, rx_err, netdev,
2228 "packet length exceeds configured MTU\n");
2233 if (netif_msg_pktdata(pdata))
2234 xgbe_print_pkt(netdev, skb, false);
2236 skb_checksum_none_assert(skb);
2237 if (XGMAC_GET_BITS(packet->attributes,
2238 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2239 skb->ip_summed = CHECKSUM_UNNECESSARY;
2241 if (XGMAC_GET_BITS(packet->attributes,
2242 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2243 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2246 if (XGMAC_GET_BITS(packet->attributes,
2247 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2250 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2252 hwtstamps = skb_hwtstamps(skb);
2253 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2256 if (XGMAC_GET_BITS(packet->attributes,
2257 RX_PACKET_ATTRIBUTES, RSS_HASH))
2258 skb_set_hash(skb, packet->rss_hash,
2259 packet->rss_hash_type);
2262 skb->protocol = eth_type_trans(skb, netdev);
2263 skb_record_rx_queue(skb, channel->queue_index);
2265 napi_gro_receive(napi, skb);
2271 /* Check if we need to save state before leaving */
2272 if (received && (!last || context_next)) {
2273 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2274 rdata->state_saved = 1;
2275 rdata->state.skb = skb;
2276 rdata->state.len = len;
2277 rdata->state.error = error;
2280 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2282 return packet_count;
2285 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2287 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2289 struct xgbe_prv_data *pdata = channel->pdata;
2292 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2294 /* Cleanup Tx ring first */
2295 xgbe_tx_poll(channel);
2297 /* Process Rx ring next */
2298 processed = xgbe_rx_poll(channel, budget);
2300 /* If we processed everything, we are done */
2301 if ((processed < budget) && napi_complete_done(napi, processed)) {
2302 /* Enable Tx and Rx interrupts */
2303 if (pdata->channel_irq_mode)
2304 xgbe_enable_rx_tx_int(pdata, channel);
2306 enable_irq(channel->dma_irq);
2309 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2314 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2316 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2318 struct xgbe_channel *channel;
2320 int processed, last_processed;
2323 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2326 ring_budget = budget / pdata->rx_ring_count;
2328 last_processed = processed;
2330 channel = pdata->channel;
2331 for (i = 0; i < pdata->channel_count; i++, channel++) {
2332 /* Cleanup Tx ring first */
2333 xgbe_tx_poll(channel);
2335 /* Process Rx ring next */
2336 if (ring_budget > (budget - processed))
2337 ring_budget = budget - processed;
2338 processed += xgbe_rx_poll(channel, ring_budget);
2340 } while ((processed < budget) && (processed != last_processed));
2342 /* If we processed everything, we are done */
2343 if ((processed < budget) && napi_complete_done(napi, processed)) {
2344 /* Enable Tx and Rx interrupts */
2345 xgbe_enable_rx_tx_ints(pdata);
2348 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2353 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2354 unsigned int idx, unsigned int count, unsigned int flag)
2356 struct xgbe_ring_data *rdata;
2357 struct xgbe_ring_desc *rdesc;
2360 rdata = XGBE_GET_DESC_DATA(ring, idx);
2361 rdesc = rdata->rdesc;
2362 netdev_dbg(pdata->netdev,
2363 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2364 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2365 le32_to_cpu(rdesc->desc0),
2366 le32_to_cpu(rdesc->desc1),
2367 le32_to_cpu(rdesc->desc2),
2368 le32_to_cpu(rdesc->desc3));
2373 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2376 struct xgbe_ring_data *rdata;
2377 struct xgbe_ring_desc *rdesc;
2379 rdata = XGBE_GET_DESC_DATA(ring, idx);
2380 rdesc = rdata->rdesc;
2381 netdev_dbg(pdata->netdev,
2382 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2383 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2384 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2387 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2389 struct ethhdr *eth = (struct ethhdr *)skb->data;
2390 unsigned char *buf = skb->data;
2391 unsigned char buffer[128];
2394 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2396 netdev_dbg(netdev, "%s packet of %d bytes\n",
2397 (tx_rx ? "TX" : "RX"), skb->len);
2399 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2400 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2401 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2403 for (i = 0, j = 0; i < skb->len;) {
2404 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
2407 if ((i % 32) == 0) {
2408 netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
2410 } else if ((i % 16) == 0) {
2413 } else if ((i % 4) == 0) {
2418 netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
2420 netdev_dbg(netdev, "\n************** SKB dump ****************\n");