]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/amd/xgbe/xgbe-drv.c
net: ethernet: update drivers to make both SW and HW TX timestamps
[karo-tx-linux.git] / drivers / net / ethernet / amd / xgbe / xgbe-drv.c
1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <net/busy_poll.h>
123 #include <linux/clk.h>
124 #include <linux/if_ether.h>
125 #include <linux/net_tstamp.h>
126 #include <linux/phy.h>
127
128 #include "xgbe.h"
129 #include "xgbe-common.h"
130
131 static unsigned int ecc_sec_info_threshold = 10;
132 static unsigned int ecc_sec_warn_threshold = 10000;
133 static unsigned int ecc_sec_period = 600;
134 static unsigned int ecc_ded_threshold = 2;
135 static unsigned int ecc_ded_period = 600;
136
137 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
138 /* Only expose the ECC parameters if supported */
139 module_param(ecc_sec_info_threshold, uint, S_IWUSR | S_IRUGO);
140 MODULE_PARM_DESC(ecc_sec_info_threshold,
141                  " ECC corrected error informational threshold setting");
142
143 module_param(ecc_sec_warn_threshold, uint, S_IWUSR | S_IRUGO);
144 MODULE_PARM_DESC(ecc_sec_warn_threshold,
145                  " ECC corrected error warning threshold setting");
146
147 module_param(ecc_sec_period, uint, S_IWUSR | S_IRUGO);
148 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
149
150 module_param(ecc_ded_threshold, uint, S_IWUSR | S_IRUGO);
151 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
152
153 module_param(ecc_ded_period, uint, S_IWUSR | S_IRUGO);
154 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
155 #endif
156
157 static int xgbe_one_poll(struct napi_struct *, int);
158 static int xgbe_all_poll(struct napi_struct *, int);
159 static void xgbe_stop(struct xgbe_prv_data *);
160
161 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
162 {
163         struct xgbe_channel *channel_mem, *channel;
164         struct xgbe_ring *tx_ring, *rx_ring;
165         unsigned int count, i;
166         int ret = -ENOMEM;
167
168         count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
169
170         channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
171         if (!channel_mem)
172                 goto err_channel;
173
174         tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
175                           GFP_KERNEL);
176         if (!tx_ring)
177                 goto err_tx_ring;
178
179         rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
180                           GFP_KERNEL);
181         if (!rx_ring)
182                 goto err_rx_ring;
183
184         for (i = 0, channel = channel_mem; i < count; i++, channel++) {
185                 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
186                 channel->pdata = pdata;
187                 channel->queue_index = i;
188                 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
189                                     (DMA_CH_INC * i);
190
191                 if (pdata->per_channel_irq)
192                         channel->dma_irq = pdata->channel_irq[i];
193
194                 if (i < pdata->tx_ring_count) {
195                         spin_lock_init(&tx_ring->lock);
196                         channel->tx_ring = tx_ring++;
197                 }
198
199                 if (i < pdata->rx_ring_count) {
200                         spin_lock_init(&rx_ring->lock);
201                         channel->rx_ring = rx_ring++;
202                 }
203
204                 netif_dbg(pdata, drv, pdata->netdev,
205                           "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
206                           channel->name, channel->dma_regs, channel->dma_irq,
207                           channel->tx_ring, channel->rx_ring);
208         }
209
210         pdata->channel = channel_mem;
211         pdata->channel_count = count;
212
213         return 0;
214
215 err_rx_ring:
216         kfree(tx_ring);
217
218 err_tx_ring:
219         kfree(channel_mem);
220
221 err_channel:
222         return ret;
223 }
224
225 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
226 {
227         if (!pdata->channel)
228                 return;
229
230         kfree(pdata->channel->rx_ring);
231         kfree(pdata->channel->tx_ring);
232         kfree(pdata->channel);
233
234         pdata->channel = NULL;
235         pdata->channel_count = 0;
236 }
237
238 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
239 {
240         return (ring->rdesc_count - (ring->cur - ring->dirty));
241 }
242
243 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
244 {
245         return (ring->cur - ring->dirty);
246 }
247
248 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
249                                     struct xgbe_ring *ring, unsigned int count)
250 {
251         struct xgbe_prv_data *pdata = channel->pdata;
252
253         if (count > xgbe_tx_avail_desc(ring)) {
254                 netif_info(pdata, drv, pdata->netdev,
255                            "Tx queue stopped, not enough descriptors available\n");
256                 netif_stop_subqueue(pdata->netdev, channel->queue_index);
257                 ring->tx.queue_stopped = 1;
258
259                 /* If we haven't notified the hardware because of xmit_more
260                  * support, tell it now
261                  */
262                 if (ring->tx.xmit_more)
263                         pdata->hw_if.tx_start_xmit(channel, ring);
264
265                 return NETDEV_TX_BUSY;
266         }
267
268         return 0;
269 }
270
271 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
272 {
273         unsigned int rx_buf_size;
274
275         rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
276         rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
277
278         rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
279                       ~(XGBE_RX_BUF_ALIGN - 1);
280
281         return rx_buf_size;
282 }
283
284 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
285                                   struct xgbe_channel *channel)
286 {
287         struct xgbe_hw_if *hw_if = &pdata->hw_if;
288         enum xgbe_int int_id;
289
290         if (channel->tx_ring && channel->rx_ring)
291                 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
292         else if (channel->tx_ring)
293                 int_id = XGMAC_INT_DMA_CH_SR_TI;
294         else if (channel->rx_ring)
295                 int_id = XGMAC_INT_DMA_CH_SR_RI;
296         else
297                 return;
298
299         hw_if->enable_int(channel, int_id);
300 }
301
302 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
303 {
304         struct xgbe_channel *channel;
305         unsigned int i;
306
307         channel = pdata->channel;
308         for (i = 0; i < pdata->channel_count; i++, channel++)
309                 xgbe_enable_rx_tx_int(pdata, channel);
310 }
311
312 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
313                                    struct xgbe_channel *channel)
314 {
315         struct xgbe_hw_if *hw_if = &pdata->hw_if;
316         enum xgbe_int int_id;
317
318         if (channel->tx_ring && channel->rx_ring)
319                 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
320         else if (channel->tx_ring)
321                 int_id = XGMAC_INT_DMA_CH_SR_TI;
322         else if (channel->rx_ring)
323                 int_id = XGMAC_INT_DMA_CH_SR_RI;
324         else
325                 return;
326
327         hw_if->disable_int(channel, int_id);
328 }
329
330 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
331 {
332         struct xgbe_channel *channel;
333         unsigned int i;
334
335         channel = pdata->channel;
336         for (i = 0; i < pdata->channel_count; i++, channel++)
337                 xgbe_disable_rx_tx_int(pdata, channel);
338 }
339
340 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
341                          unsigned int *count, const char *area)
342 {
343         if (time_before(jiffies, *period)) {
344                 (*count)++;
345         } else {
346                 *period = jiffies + (ecc_sec_period * HZ);
347                 *count = 1;
348         }
349
350         if (*count > ecc_sec_info_threshold)
351                 dev_warn_once(pdata->dev,
352                               "%s ECC corrected errors exceed informational threshold\n",
353                               area);
354
355         if (*count > ecc_sec_warn_threshold) {
356                 dev_warn_once(pdata->dev,
357                               "%s ECC corrected errors exceed warning threshold\n",
358                               area);
359                 return true;
360         }
361
362         return false;
363 }
364
365 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
366                          unsigned int *count, const char *area)
367 {
368         if (time_before(jiffies, *period)) {
369                 (*count)++;
370         } else {
371                 *period = jiffies + (ecc_ded_period * HZ);
372                 *count = 1;
373         }
374
375         if (*count > ecc_ded_threshold) {
376                 netdev_alert(pdata->netdev,
377                              "%s ECC detected errors exceed threshold\n",
378                              area);
379                 return true;
380         }
381
382         return false;
383 }
384
385 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
386 {
387         struct xgbe_prv_data *pdata = data;
388         unsigned int ecc_isr;
389         bool stop = false;
390
391         /* Mask status with only the interrupts we care about */
392         ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
393         ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
394         netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
395
396         if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
397                 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
398                                      &pdata->tx_ded_count, "TX fifo");
399         }
400
401         if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
402                 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
403                                      &pdata->rx_ded_count, "RX fifo");
404         }
405
406         if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
407                 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
408                                      &pdata->desc_ded_count,
409                                      "descriptor cache");
410         }
411
412         if (stop) {
413                 pdata->hw_if.disable_ecc_ded(pdata);
414                 schedule_work(&pdata->stopdev_work);
415                 goto out;
416         }
417
418         if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
419                 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
420                                  &pdata->tx_sec_count, "TX fifo"))
421                         pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
422         }
423
424         if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
425                 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
426                                  &pdata->rx_sec_count, "RX fifo"))
427                         pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
428
429         if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
430                 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
431                                  &pdata->desc_sec_count, "descriptor cache"))
432                         pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
433
434 out:
435         /* Clear all ECC interrupts */
436         XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
437
438         return IRQ_HANDLED;
439 }
440
441 static irqreturn_t xgbe_isr(int irq, void *data)
442 {
443         struct xgbe_prv_data *pdata = data;
444         struct xgbe_hw_if *hw_if = &pdata->hw_if;
445         struct xgbe_channel *channel;
446         unsigned int dma_isr, dma_ch_isr;
447         unsigned int mac_isr, mac_tssr, mac_mdioisr;
448         unsigned int i;
449
450         /* The DMA interrupt status register also reports MAC and MTL
451          * interrupts. So for polling mode, we just need to check for
452          * this register to be non-zero
453          */
454         dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
455         if (!dma_isr)
456                 goto isr_done;
457
458         netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
459
460         for (i = 0; i < pdata->channel_count; i++) {
461                 if (!(dma_isr & (1 << i)))
462                         continue;
463
464                 channel = pdata->channel + i;
465
466                 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
467                 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
468                           i, dma_ch_isr);
469
470                 /* The TI or RI interrupt bits may still be set even if using
471                  * per channel DMA interrupts. Check to be sure those are not
472                  * enabled before using the private data napi structure.
473                  */
474                 if (!pdata->per_channel_irq &&
475                     (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
476                      XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
477                         if (napi_schedule_prep(&pdata->napi)) {
478                                 /* Disable Tx and Rx interrupts */
479                                 xgbe_disable_rx_tx_ints(pdata);
480
481                                 /* Turn on polling */
482                                 __napi_schedule_irqoff(&pdata->napi);
483                         }
484                 } else {
485                         /* Don't clear Rx/Tx status if doing per channel DMA
486                          * interrupts, these will be cleared by the ISR for
487                          * per channel DMA interrupts.
488                          */
489                         XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
490                         XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
491                 }
492
493                 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
494                         pdata->ext_stats.rx_buffer_unavailable++;
495
496                 /* Restart the device on a Fatal Bus Error */
497                 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
498                         schedule_work(&pdata->restart_work);
499
500                 /* Clear interrupt signals */
501                 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
502         }
503
504         if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
505                 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
506
507                 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
508                           mac_isr);
509
510                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
511                         hw_if->tx_mmc_int(pdata);
512
513                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
514                         hw_if->rx_mmc_int(pdata);
515
516                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
517                         mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
518
519                         netif_dbg(pdata, intr, pdata->netdev,
520                                   "MAC_TSSR=%#010x\n", mac_tssr);
521
522                         if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
523                                 /* Read Tx Timestamp to clear interrupt */
524                                 pdata->tx_tstamp =
525                                         hw_if->get_tx_tstamp(pdata);
526                                 queue_work(pdata->dev_workqueue,
527                                            &pdata->tx_tstamp_work);
528                         }
529                 }
530
531                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
532                         mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
533
534                         netif_dbg(pdata, intr, pdata->netdev,
535                                   "MAC_MDIOISR=%#010x\n", mac_mdioisr);
536
537                         if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
538                                            SNGLCOMPINT))
539                                 complete(&pdata->mdio_complete);
540                 }
541         }
542
543 isr_done:
544         /* If there is not a separate AN irq, handle it here */
545         if (pdata->dev_irq == pdata->an_irq)
546                 pdata->phy_if.an_isr(irq, pdata);
547
548         /* If there is not a separate ECC irq, handle it here */
549         if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
550                 xgbe_ecc_isr(irq, pdata);
551
552         /* If there is not a separate I2C irq, handle it here */
553         if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
554                 pdata->i2c_if.i2c_isr(irq, pdata);
555
556         return IRQ_HANDLED;
557 }
558
559 static irqreturn_t xgbe_dma_isr(int irq, void *data)
560 {
561         struct xgbe_channel *channel = data;
562         struct xgbe_prv_data *pdata = channel->pdata;
563         unsigned int dma_status;
564
565         /* Per channel DMA interrupts are enabled, so we use the per
566          * channel napi structure and not the private data napi structure
567          */
568         if (napi_schedule_prep(&channel->napi)) {
569                 /* Disable Tx and Rx interrupts */
570                 if (pdata->channel_irq_mode)
571                         xgbe_disable_rx_tx_int(pdata, channel);
572                 else
573                         disable_irq_nosync(channel->dma_irq);
574
575                 /* Turn on polling */
576                 __napi_schedule_irqoff(&channel->napi);
577         }
578
579         /* Clear Tx/Rx signals */
580         dma_status = 0;
581         XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
582         XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
583         XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
584
585         return IRQ_HANDLED;
586 }
587
588 static void xgbe_tx_timer(unsigned long data)
589 {
590         struct xgbe_channel *channel = (struct xgbe_channel *)data;
591         struct xgbe_prv_data *pdata = channel->pdata;
592         struct napi_struct *napi;
593
594         DBGPR("-->xgbe_tx_timer\n");
595
596         napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
597
598         if (napi_schedule_prep(napi)) {
599                 /* Disable Tx and Rx interrupts */
600                 if (pdata->per_channel_irq)
601                         if (pdata->channel_irq_mode)
602                                 xgbe_disable_rx_tx_int(pdata, channel);
603                         else
604                                 disable_irq_nosync(channel->dma_irq);
605                 else
606                         xgbe_disable_rx_tx_ints(pdata);
607
608                 /* Turn on polling */
609                 __napi_schedule(napi);
610         }
611
612         channel->tx_timer_active = 0;
613
614         DBGPR("<--xgbe_tx_timer\n");
615 }
616
617 static void xgbe_service(struct work_struct *work)
618 {
619         struct xgbe_prv_data *pdata = container_of(work,
620                                                    struct xgbe_prv_data,
621                                                    service_work);
622
623         pdata->phy_if.phy_status(pdata);
624 }
625
626 static void xgbe_service_timer(unsigned long data)
627 {
628         struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
629
630         queue_work(pdata->dev_workqueue, &pdata->service_work);
631
632         mod_timer(&pdata->service_timer, jiffies + HZ);
633 }
634
635 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
636 {
637         struct xgbe_channel *channel;
638         unsigned int i;
639
640         setup_timer(&pdata->service_timer, xgbe_service_timer,
641                     (unsigned long)pdata);
642
643         channel = pdata->channel;
644         for (i = 0; i < pdata->channel_count; i++, channel++) {
645                 if (!channel->tx_ring)
646                         break;
647
648                 setup_timer(&channel->tx_timer, xgbe_tx_timer,
649                             (unsigned long)channel);
650         }
651 }
652
653 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
654 {
655         mod_timer(&pdata->service_timer, jiffies + HZ);
656 }
657
658 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
659 {
660         struct xgbe_channel *channel;
661         unsigned int i;
662
663         del_timer_sync(&pdata->service_timer);
664
665         channel = pdata->channel;
666         for (i = 0; i < pdata->channel_count; i++, channel++) {
667                 if (!channel->tx_ring)
668                         break;
669
670                 del_timer_sync(&channel->tx_timer);
671         }
672 }
673
674 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
675 {
676         unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
677         struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
678
679         DBGPR("-->xgbe_get_all_hw_features\n");
680
681         mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
682         mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
683         mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
684
685         memset(hw_feat, 0, sizeof(*hw_feat));
686
687         hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
688
689         /* Hardware feature register 0 */
690         hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
691         hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
692         hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
693         hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
694         hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
695         hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
696         hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
697         hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
698         hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
699         hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
700         hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
701         hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
702                                               ADDMACADRSEL);
703         hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
704         hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
705
706         /* Hardware feature register 1 */
707         hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
708                                                 RXFIFOSIZE);
709         hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
710                                                 TXFIFOSIZE);
711         hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
712         hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
713         hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
714         hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
715         hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
716         hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
717         hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
718         hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
719         hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
720                                                   HASHTBLSZ);
721         hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
722                                                   L3L4FNUM);
723
724         /* Hardware feature register 2 */
725         hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
726         hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
727         hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
728         hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
729         hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
730         hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
731
732         /* Translate the Hash Table size into actual number */
733         switch (hw_feat->hash_table_size) {
734         case 0:
735                 break;
736         case 1:
737                 hw_feat->hash_table_size = 64;
738                 break;
739         case 2:
740                 hw_feat->hash_table_size = 128;
741                 break;
742         case 3:
743                 hw_feat->hash_table_size = 256;
744                 break;
745         }
746
747         /* Translate the address width setting into actual number */
748         switch (hw_feat->dma_width) {
749         case 0:
750                 hw_feat->dma_width = 32;
751                 break;
752         case 1:
753                 hw_feat->dma_width = 40;
754                 break;
755         case 2:
756                 hw_feat->dma_width = 48;
757                 break;
758         default:
759                 hw_feat->dma_width = 32;
760         }
761
762         /* The Queue, Channel and TC counts are zero based so increment them
763          * to get the actual number
764          */
765         hw_feat->rx_q_cnt++;
766         hw_feat->tx_q_cnt++;
767         hw_feat->rx_ch_cnt++;
768         hw_feat->tx_ch_cnt++;
769         hw_feat->tc_cnt++;
770
771         /* Translate the fifo sizes into actual numbers */
772         hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
773         hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
774
775         DBGPR("<--xgbe_get_all_hw_features\n");
776 }
777
778 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
779 {
780         struct xgbe_channel *channel;
781         unsigned int i;
782
783         if (pdata->per_channel_irq) {
784                 channel = pdata->channel;
785                 for (i = 0; i < pdata->channel_count; i++, channel++) {
786                         if (add)
787                                 netif_napi_add(pdata->netdev, &channel->napi,
788                                                xgbe_one_poll, NAPI_POLL_WEIGHT);
789
790                         napi_enable(&channel->napi);
791                 }
792         } else {
793                 if (add)
794                         netif_napi_add(pdata->netdev, &pdata->napi,
795                                        xgbe_all_poll, NAPI_POLL_WEIGHT);
796
797                 napi_enable(&pdata->napi);
798         }
799 }
800
801 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
802 {
803         struct xgbe_channel *channel;
804         unsigned int i;
805
806         if (pdata->per_channel_irq) {
807                 channel = pdata->channel;
808                 for (i = 0; i < pdata->channel_count; i++, channel++) {
809                         napi_disable(&channel->napi);
810
811                         if (del)
812                                 netif_napi_del(&channel->napi);
813                 }
814         } else {
815                 napi_disable(&pdata->napi);
816
817                 if (del)
818                         netif_napi_del(&pdata->napi);
819         }
820 }
821
822 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
823 {
824         struct xgbe_channel *channel;
825         struct net_device *netdev = pdata->netdev;
826         unsigned int i;
827         int ret;
828
829         ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
830                                netdev->name, pdata);
831         if (ret) {
832                 netdev_alert(netdev, "error requesting irq %d\n",
833                              pdata->dev_irq);
834                 return ret;
835         }
836
837         if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
838                 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
839                                        0, pdata->ecc_name, pdata);
840                 if (ret) {
841                         netdev_alert(netdev, "error requesting ecc irq %d\n",
842                                      pdata->ecc_irq);
843                         goto err_dev_irq;
844                 }
845         }
846
847         if (!pdata->per_channel_irq)
848                 return 0;
849
850         channel = pdata->channel;
851         for (i = 0; i < pdata->channel_count; i++, channel++) {
852                 snprintf(channel->dma_irq_name,
853                          sizeof(channel->dma_irq_name) - 1,
854                          "%s-TxRx-%u", netdev_name(netdev),
855                          channel->queue_index);
856
857                 ret = devm_request_irq(pdata->dev, channel->dma_irq,
858                                        xgbe_dma_isr, 0,
859                                        channel->dma_irq_name, channel);
860                 if (ret) {
861                         netdev_alert(netdev, "error requesting irq %d\n",
862                                      channel->dma_irq);
863                         goto err_dma_irq;
864                 }
865         }
866
867         return 0;
868
869 err_dma_irq:
870         /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
871         for (i--, channel--; i < pdata->channel_count; i--, channel--)
872                 devm_free_irq(pdata->dev, channel->dma_irq, channel);
873
874         if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
875                 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
876
877 err_dev_irq:
878         devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
879
880         return ret;
881 }
882
883 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
884 {
885         struct xgbe_channel *channel;
886         unsigned int i;
887
888         devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
889
890         if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
891                 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
892
893         if (!pdata->per_channel_irq)
894                 return;
895
896         channel = pdata->channel;
897         for (i = 0; i < pdata->channel_count; i++, channel++)
898                 devm_free_irq(pdata->dev, channel->dma_irq, channel);
899 }
900
901 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
902 {
903         struct xgbe_hw_if *hw_if = &pdata->hw_if;
904
905         DBGPR("-->xgbe_init_tx_coalesce\n");
906
907         pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
908         pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
909
910         hw_if->config_tx_coalesce(pdata);
911
912         DBGPR("<--xgbe_init_tx_coalesce\n");
913 }
914
915 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
916 {
917         struct xgbe_hw_if *hw_if = &pdata->hw_if;
918
919         DBGPR("-->xgbe_init_rx_coalesce\n");
920
921         pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
922         pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
923         pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
924
925         hw_if->config_rx_coalesce(pdata);
926
927         DBGPR("<--xgbe_init_rx_coalesce\n");
928 }
929
930 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
931 {
932         struct xgbe_desc_if *desc_if = &pdata->desc_if;
933         struct xgbe_channel *channel;
934         struct xgbe_ring *ring;
935         struct xgbe_ring_data *rdata;
936         unsigned int i, j;
937
938         DBGPR("-->xgbe_free_tx_data\n");
939
940         channel = pdata->channel;
941         for (i = 0; i < pdata->channel_count; i++, channel++) {
942                 ring = channel->tx_ring;
943                 if (!ring)
944                         break;
945
946                 for (j = 0; j < ring->rdesc_count; j++) {
947                         rdata = XGBE_GET_DESC_DATA(ring, j);
948                         desc_if->unmap_rdata(pdata, rdata);
949                 }
950         }
951
952         DBGPR("<--xgbe_free_tx_data\n");
953 }
954
955 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
956 {
957         struct xgbe_desc_if *desc_if = &pdata->desc_if;
958         struct xgbe_channel *channel;
959         struct xgbe_ring *ring;
960         struct xgbe_ring_data *rdata;
961         unsigned int i, j;
962
963         DBGPR("-->xgbe_free_rx_data\n");
964
965         channel = pdata->channel;
966         for (i = 0; i < pdata->channel_count; i++, channel++) {
967                 ring = channel->rx_ring;
968                 if (!ring)
969                         break;
970
971                 for (j = 0; j < ring->rdesc_count; j++) {
972                         rdata = XGBE_GET_DESC_DATA(ring, j);
973                         desc_if->unmap_rdata(pdata, rdata);
974                 }
975         }
976
977         DBGPR("<--xgbe_free_rx_data\n");
978 }
979
980 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
981 {
982         pdata->phy_link = -1;
983         pdata->phy_speed = SPEED_UNKNOWN;
984
985         return pdata->phy_if.phy_reset(pdata);
986 }
987
988 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
989 {
990         struct xgbe_prv_data *pdata = netdev_priv(netdev);
991         struct xgbe_hw_if *hw_if = &pdata->hw_if;
992         unsigned long flags;
993
994         DBGPR("-->xgbe_powerdown\n");
995
996         if (!netif_running(netdev) ||
997             (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
998                 netdev_alert(netdev, "Device is already powered down\n");
999                 DBGPR("<--xgbe_powerdown\n");
1000                 return -EINVAL;
1001         }
1002
1003         spin_lock_irqsave(&pdata->lock, flags);
1004
1005         if (caller == XGMAC_DRIVER_CONTEXT)
1006                 netif_device_detach(netdev);
1007
1008         netif_tx_stop_all_queues(netdev);
1009
1010         xgbe_stop_timers(pdata);
1011         flush_workqueue(pdata->dev_workqueue);
1012
1013         hw_if->powerdown_tx(pdata);
1014         hw_if->powerdown_rx(pdata);
1015
1016         xgbe_napi_disable(pdata, 0);
1017
1018         pdata->power_down = 1;
1019
1020         spin_unlock_irqrestore(&pdata->lock, flags);
1021
1022         DBGPR("<--xgbe_powerdown\n");
1023
1024         return 0;
1025 }
1026
1027 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1028 {
1029         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1030         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1031         unsigned long flags;
1032
1033         DBGPR("-->xgbe_powerup\n");
1034
1035         if (!netif_running(netdev) ||
1036             (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1037                 netdev_alert(netdev, "Device is already powered up\n");
1038                 DBGPR("<--xgbe_powerup\n");
1039                 return -EINVAL;
1040         }
1041
1042         spin_lock_irqsave(&pdata->lock, flags);
1043
1044         pdata->power_down = 0;
1045
1046         xgbe_napi_enable(pdata, 0);
1047
1048         hw_if->powerup_tx(pdata);
1049         hw_if->powerup_rx(pdata);
1050
1051         if (caller == XGMAC_DRIVER_CONTEXT)
1052                 netif_device_attach(netdev);
1053
1054         netif_tx_start_all_queues(netdev);
1055
1056         xgbe_start_timers(pdata);
1057
1058         spin_unlock_irqrestore(&pdata->lock, flags);
1059
1060         DBGPR("<--xgbe_powerup\n");
1061
1062         return 0;
1063 }
1064
1065 static int xgbe_start(struct xgbe_prv_data *pdata)
1066 {
1067         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1068         struct xgbe_phy_if *phy_if = &pdata->phy_if;
1069         struct net_device *netdev = pdata->netdev;
1070         int ret;
1071
1072         DBGPR("-->xgbe_start\n");
1073
1074         ret = hw_if->init(pdata);
1075         if (ret)
1076                 return ret;
1077
1078         xgbe_napi_enable(pdata, 1);
1079
1080         ret = xgbe_request_irqs(pdata);
1081         if (ret)
1082                 goto err_napi;
1083
1084         ret = phy_if->phy_start(pdata);
1085         if (ret)
1086                 goto err_irqs;
1087
1088         hw_if->enable_tx(pdata);
1089         hw_if->enable_rx(pdata);
1090
1091         netif_tx_start_all_queues(netdev);
1092
1093         xgbe_start_timers(pdata);
1094         queue_work(pdata->dev_workqueue, &pdata->service_work);
1095
1096         clear_bit(XGBE_STOPPED, &pdata->dev_state);
1097
1098         DBGPR("<--xgbe_start\n");
1099
1100         return 0;
1101
1102 err_irqs:
1103         xgbe_free_irqs(pdata);
1104
1105 err_napi:
1106         xgbe_napi_disable(pdata, 1);
1107
1108         hw_if->exit(pdata);
1109
1110         return ret;
1111 }
1112
1113 static void xgbe_stop(struct xgbe_prv_data *pdata)
1114 {
1115         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1116         struct xgbe_phy_if *phy_if = &pdata->phy_if;
1117         struct xgbe_channel *channel;
1118         struct net_device *netdev = pdata->netdev;
1119         struct netdev_queue *txq;
1120         unsigned int i;
1121
1122         DBGPR("-->xgbe_stop\n");
1123
1124         if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1125                 return;
1126
1127         netif_tx_stop_all_queues(netdev);
1128
1129         xgbe_stop_timers(pdata);
1130         flush_workqueue(pdata->dev_workqueue);
1131
1132         hw_if->disable_tx(pdata);
1133         hw_if->disable_rx(pdata);
1134
1135         phy_if->phy_stop(pdata);
1136
1137         xgbe_free_irqs(pdata);
1138
1139         xgbe_napi_disable(pdata, 1);
1140
1141         hw_if->exit(pdata);
1142
1143         channel = pdata->channel;
1144         for (i = 0; i < pdata->channel_count; i++, channel++) {
1145                 if (!channel->tx_ring)
1146                         continue;
1147
1148                 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1149                 netdev_tx_reset_queue(txq);
1150         }
1151
1152         set_bit(XGBE_STOPPED, &pdata->dev_state);
1153
1154         DBGPR("<--xgbe_stop\n");
1155 }
1156
1157 static void xgbe_stopdev(struct work_struct *work)
1158 {
1159         struct xgbe_prv_data *pdata = container_of(work,
1160                                                    struct xgbe_prv_data,
1161                                                    stopdev_work);
1162
1163         rtnl_lock();
1164
1165         xgbe_stop(pdata);
1166
1167         xgbe_free_tx_data(pdata);
1168         xgbe_free_rx_data(pdata);
1169
1170         rtnl_unlock();
1171
1172         netdev_alert(pdata->netdev, "device stopped\n");
1173 }
1174
1175 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1176 {
1177         DBGPR("-->xgbe_restart_dev\n");
1178
1179         /* If not running, "restart" will happen on open */
1180         if (!netif_running(pdata->netdev))
1181                 return;
1182
1183         xgbe_stop(pdata);
1184
1185         xgbe_free_tx_data(pdata);
1186         xgbe_free_rx_data(pdata);
1187
1188         xgbe_start(pdata);
1189
1190         DBGPR("<--xgbe_restart_dev\n");
1191 }
1192
1193 static void xgbe_restart(struct work_struct *work)
1194 {
1195         struct xgbe_prv_data *pdata = container_of(work,
1196                                                    struct xgbe_prv_data,
1197                                                    restart_work);
1198
1199         rtnl_lock();
1200
1201         xgbe_restart_dev(pdata);
1202
1203         rtnl_unlock();
1204 }
1205
1206 static void xgbe_tx_tstamp(struct work_struct *work)
1207 {
1208         struct xgbe_prv_data *pdata = container_of(work,
1209                                                    struct xgbe_prv_data,
1210                                                    tx_tstamp_work);
1211         struct skb_shared_hwtstamps hwtstamps;
1212         u64 nsec;
1213         unsigned long flags;
1214
1215         if (pdata->tx_tstamp) {
1216                 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1217                                             pdata->tx_tstamp);
1218
1219                 memset(&hwtstamps, 0, sizeof(hwtstamps));
1220                 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1221                 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1222         }
1223
1224         dev_kfree_skb_any(pdata->tx_tstamp_skb);
1225
1226         spin_lock_irqsave(&pdata->tstamp_lock, flags);
1227         pdata->tx_tstamp_skb = NULL;
1228         spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1229 }
1230
1231 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1232                                       struct ifreq *ifreq)
1233 {
1234         if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1235                          sizeof(pdata->tstamp_config)))
1236                 return -EFAULT;
1237
1238         return 0;
1239 }
1240
1241 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1242                                       struct ifreq *ifreq)
1243 {
1244         struct hwtstamp_config config;
1245         unsigned int mac_tscr;
1246
1247         if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1248                 return -EFAULT;
1249
1250         if (config.flags)
1251                 return -EINVAL;
1252
1253         mac_tscr = 0;
1254
1255         switch (config.tx_type) {
1256         case HWTSTAMP_TX_OFF:
1257                 break;
1258
1259         case HWTSTAMP_TX_ON:
1260                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1261                 break;
1262
1263         default:
1264                 return -ERANGE;
1265         }
1266
1267         switch (config.rx_filter) {
1268         case HWTSTAMP_FILTER_NONE:
1269                 break;
1270
1271         case HWTSTAMP_FILTER_NTP_ALL:
1272         case HWTSTAMP_FILTER_ALL:
1273                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1274                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1275                 break;
1276
1277         /* PTP v2, UDP, any kind of event packet */
1278         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1279                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1280         /* PTP v1, UDP, any kind of event packet */
1281         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1282                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1283                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1284                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1285                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1286                 break;
1287
1288         /* PTP v2, UDP, Sync packet */
1289         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1290                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1291         /* PTP v1, UDP, Sync packet */
1292         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1293                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1294                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1295                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1296                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1297                 break;
1298
1299         /* PTP v2, UDP, Delay_req packet */
1300         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1301                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1302         /* PTP v1, UDP, Delay_req packet */
1303         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1304                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1305                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1306                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1307                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1308                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1309                 break;
1310
1311         /* 802.AS1, Ethernet, any kind of event packet */
1312         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1313                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1314                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1315                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1316                 break;
1317
1318         /* 802.AS1, Ethernet, Sync packet */
1319         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1320                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1321                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1322                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1323                 break;
1324
1325         /* 802.AS1, Ethernet, Delay_req packet */
1326         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1327                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1328                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1329                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1330                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1331                 break;
1332
1333         /* PTP v2/802.AS1, any layer, any kind of event packet */
1334         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1335                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1336                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1337                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1338                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1339                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1340                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1341                 break;
1342
1343         /* PTP v2/802.AS1, any layer, Sync packet */
1344         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1345                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1346                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1347                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1348                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1349                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1350                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1351                 break;
1352
1353         /* PTP v2/802.AS1, any layer, Delay_req packet */
1354         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1355                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1356                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1357                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1358                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1359                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1360                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1361                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1362                 break;
1363
1364         default:
1365                 return -ERANGE;
1366         }
1367
1368         pdata->hw_if.config_tstamp(pdata, mac_tscr);
1369
1370         memcpy(&pdata->tstamp_config, &config, sizeof(config));
1371
1372         return 0;
1373 }
1374
1375 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1376                                 struct sk_buff *skb,
1377                                 struct xgbe_packet_data *packet)
1378 {
1379         unsigned long flags;
1380
1381         if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1382                 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1383                 if (pdata->tx_tstamp_skb) {
1384                         /* Another timestamp in progress, ignore this one */
1385                         XGMAC_SET_BITS(packet->attributes,
1386                                        TX_PACKET_ATTRIBUTES, PTP, 0);
1387                 } else {
1388                         pdata->tx_tstamp_skb = skb_get(skb);
1389                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1390                 }
1391                 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1392         }
1393
1394         skb_tx_timestamp(skb);
1395 }
1396
1397 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1398 {
1399         if (skb_vlan_tag_present(skb))
1400                 packet->vlan_ctag = skb_vlan_tag_get(skb);
1401 }
1402
1403 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1404 {
1405         int ret;
1406
1407         if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1408                             TSO_ENABLE))
1409                 return 0;
1410
1411         ret = skb_cow_head(skb, 0);
1412         if (ret)
1413                 return ret;
1414
1415         packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1416         packet->tcp_header_len = tcp_hdrlen(skb);
1417         packet->tcp_payload_len = skb->len - packet->header_len;
1418         packet->mss = skb_shinfo(skb)->gso_size;
1419         DBGPR("  packet->header_len=%u\n", packet->header_len);
1420         DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1421               packet->tcp_header_len, packet->tcp_payload_len);
1422         DBGPR("  packet->mss=%u\n", packet->mss);
1423
1424         /* Update the number of packets that will ultimately be transmitted
1425          * along with the extra bytes for each extra packet
1426          */
1427         packet->tx_packets = skb_shinfo(skb)->gso_segs;
1428         packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1429
1430         return 0;
1431 }
1432
1433 static int xgbe_is_tso(struct sk_buff *skb)
1434 {
1435         if (skb->ip_summed != CHECKSUM_PARTIAL)
1436                 return 0;
1437
1438         if (!skb_is_gso(skb))
1439                 return 0;
1440
1441         DBGPR("  TSO packet to be processed\n");
1442
1443         return 1;
1444 }
1445
1446 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1447                              struct xgbe_ring *ring, struct sk_buff *skb,
1448                              struct xgbe_packet_data *packet)
1449 {
1450         struct skb_frag_struct *frag;
1451         unsigned int context_desc;
1452         unsigned int len;
1453         unsigned int i;
1454
1455         packet->skb = skb;
1456
1457         context_desc = 0;
1458         packet->rdesc_count = 0;
1459
1460         packet->tx_packets = 1;
1461         packet->tx_bytes = skb->len;
1462
1463         if (xgbe_is_tso(skb)) {
1464                 /* TSO requires an extra descriptor if mss is different */
1465                 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1466                         context_desc = 1;
1467                         packet->rdesc_count++;
1468                 }
1469
1470                 /* TSO requires an extra descriptor for TSO header */
1471                 packet->rdesc_count++;
1472
1473                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1474                                TSO_ENABLE, 1);
1475                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1476                                CSUM_ENABLE, 1);
1477         } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1478                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1479                                CSUM_ENABLE, 1);
1480
1481         if (skb_vlan_tag_present(skb)) {
1482                 /* VLAN requires an extra descriptor if tag is different */
1483                 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1484                         /* We can share with the TSO context descriptor */
1485                         if (!context_desc) {
1486                                 context_desc = 1;
1487                                 packet->rdesc_count++;
1488                         }
1489
1490                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1491                                VLAN_CTAG, 1);
1492         }
1493
1494         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1495             (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1496                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1497                                PTP, 1);
1498
1499         for (len = skb_headlen(skb); len;) {
1500                 packet->rdesc_count++;
1501                 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1502         }
1503
1504         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1505                 frag = &skb_shinfo(skb)->frags[i];
1506                 for (len = skb_frag_size(frag); len; ) {
1507                         packet->rdesc_count++;
1508                         len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1509                 }
1510         }
1511 }
1512
1513 static int xgbe_open(struct net_device *netdev)
1514 {
1515         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1516         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1517         int ret;
1518
1519         DBGPR("-->xgbe_open\n");
1520
1521         /* Reset the phy settings */
1522         ret = xgbe_phy_reset(pdata);
1523         if (ret)
1524                 return ret;
1525
1526         /* Enable the clocks */
1527         ret = clk_prepare_enable(pdata->sysclk);
1528         if (ret) {
1529                 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1530                 return ret;
1531         }
1532
1533         ret = clk_prepare_enable(pdata->ptpclk);
1534         if (ret) {
1535                 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1536                 goto err_sysclk;
1537         }
1538
1539         /* Calculate the Rx buffer size before allocating rings */
1540         ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1541         if (ret < 0)
1542                 goto err_ptpclk;
1543         pdata->rx_buf_size = ret;
1544
1545         /* Allocate the channel and ring structures */
1546         ret = xgbe_alloc_channels(pdata);
1547         if (ret)
1548                 goto err_ptpclk;
1549
1550         /* Allocate the ring descriptors and buffers */
1551         ret = desc_if->alloc_ring_resources(pdata);
1552         if (ret)
1553                 goto err_channels;
1554
1555         INIT_WORK(&pdata->service_work, xgbe_service);
1556         INIT_WORK(&pdata->restart_work, xgbe_restart);
1557         INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1558         INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1559         xgbe_init_timers(pdata);
1560
1561         ret = xgbe_start(pdata);
1562         if (ret)
1563                 goto err_rings;
1564
1565         clear_bit(XGBE_DOWN, &pdata->dev_state);
1566
1567         DBGPR("<--xgbe_open\n");
1568
1569         return 0;
1570
1571 err_rings:
1572         desc_if->free_ring_resources(pdata);
1573
1574 err_channels:
1575         xgbe_free_channels(pdata);
1576
1577 err_ptpclk:
1578         clk_disable_unprepare(pdata->ptpclk);
1579
1580 err_sysclk:
1581         clk_disable_unprepare(pdata->sysclk);
1582
1583         return ret;
1584 }
1585
1586 static int xgbe_close(struct net_device *netdev)
1587 {
1588         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1589         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1590
1591         DBGPR("-->xgbe_close\n");
1592
1593         /* Stop the device */
1594         xgbe_stop(pdata);
1595
1596         /* Free the ring descriptors and buffers */
1597         desc_if->free_ring_resources(pdata);
1598
1599         /* Free the channel and ring structures */
1600         xgbe_free_channels(pdata);
1601
1602         /* Disable the clocks */
1603         clk_disable_unprepare(pdata->ptpclk);
1604         clk_disable_unprepare(pdata->sysclk);
1605
1606         set_bit(XGBE_DOWN, &pdata->dev_state);
1607
1608         DBGPR("<--xgbe_close\n");
1609
1610         return 0;
1611 }
1612
1613 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1614 {
1615         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1616         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1617         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1618         struct xgbe_channel *channel;
1619         struct xgbe_ring *ring;
1620         struct xgbe_packet_data *packet;
1621         struct netdev_queue *txq;
1622         int ret;
1623
1624         DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1625
1626         channel = pdata->channel + skb->queue_mapping;
1627         txq = netdev_get_tx_queue(netdev, channel->queue_index);
1628         ring = channel->tx_ring;
1629         packet = &ring->packet_data;
1630
1631         ret = NETDEV_TX_OK;
1632
1633         if (skb->len == 0) {
1634                 netif_err(pdata, tx_err, netdev,
1635                           "empty skb received from stack\n");
1636                 dev_kfree_skb_any(skb);
1637                 goto tx_netdev_return;
1638         }
1639
1640         /* Calculate preliminary packet info */
1641         memset(packet, 0, sizeof(*packet));
1642         xgbe_packet_info(pdata, ring, skb, packet);
1643
1644         /* Check that there are enough descriptors available */
1645         ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1646         if (ret)
1647                 goto tx_netdev_return;
1648
1649         ret = xgbe_prep_tso(skb, packet);
1650         if (ret) {
1651                 netif_err(pdata, tx_err, netdev,
1652                           "error processing TSO packet\n");
1653                 dev_kfree_skb_any(skb);
1654                 goto tx_netdev_return;
1655         }
1656         xgbe_prep_vlan(skb, packet);
1657
1658         if (!desc_if->map_tx_skb(channel, skb)) {
1659                 dev_kfree_skb_any(skb);
1660                 goto tx_netdev_return;
1661         }
1662
1663         xgbe_prep_tx_tstamp(pdata, skb, packet);
1664
1665         /* Report on the actual number of bytes (to be) sent */
1666         netdev_tx_sent_queue(txq, packet->tx_bytes);
1667
1668         /* Configure required descriptor fields for transmission */
1669         hw_if->dev_xmit(channel);
1670
1671         if (netif_msg_pktdata(pdata))
1672                 xgbe_print_pkt(netdev, skb, true);
1673
1674         /* Stop the queue in advance if there may not be enough descriptors */
1675         xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1676
1677         ret = NETDEV_TX_OK;
1678
1679 tx_netdev_return:
1680         return ret;
1681 }
1682
1683 static void xgbe_set_rx_mode(struct net_device *netdev)
1684 {
1685         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1686         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1687
1688         DBGPR("-->xgbe_set_rx_mode\n");
1689
1690         hw_if->config_rx_mode(pdata);
1691
1692         DBGPR("<--xgbe_set_rx_mode\n");
1693 }
1694
1695 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1696 {
1697         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1698         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1699         struct sockaddr *saddr = addr;
1700
1701         DBGPR("-->xgbe_set_mac_address\n");
1702
1703         if (!is_valid_ether_addr(saddr->sa_data))
1704                 return -EADDRNOTAVAIL;
1705
1706         memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1707
1708         hw_if->set_mac_address(pdata, netdev->dev_addr);
1709
1710         DBGPR("<--xgbe_set_mac_address\n");
1711
1712         return 0;
1713 }
1714
1715 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1716 {
1717         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1718         int ret;
1719
1720         switch (cmd) {
1721         case SIOCGHWTSTAMP:
1722                 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1723                 break;
1724
1725         case SIOCSHWTSTAMP:
1726                 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1727                 break;
1728
1729         default:
1730                 ret = -EOPNOTSUPP;
1731         }
1732
1733         return ret;
1734 }
1735
1736 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1737 {
1738         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1739         int ret;
1740
1741         DBGPR("-->xgbe_change_mtu\n");
1742
1743         ret = xgbe_calc_rx_buf_size(netdev, mtu);
1744         if (ret < 0)
1745                 return ret;
1746
1747         pdata->rx_buf_size = ret;
1748         netdev->mtu = mtu;
1749
1750         xgbe_restart_dev(pdata);
1751
1752         DBGPR("<--xgbe_change_mtu\n");
1753
1754         return 0;
1755 }
1756
1757 static void xgbe_tx_timeout(struct net_device *netdev)
1758 {
1759         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1760
1761         netdev_warn(netdev, "tx timeout, device restarting\n");
1762         schedule_work(&pdata->restart_work);
1763 }
1764
1765 static void xgbe_get_stats64(struct net_device *netdev,
1766                              struct rtnl_link_stats64 *s)
1767 {
1768         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1769         struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1770
1771         DBGPR("-->%s\n", __func__);
1772
1773         pdata->hw_if.read_mmc_stats(pdata);
1774
1775         s->rx_packets = pstats->rxframecount_gb;
1776         s->rx_bytes = pstats->rxoctetcount_gb;
1777         s->rx_errors = pstats->rxframecount_gb -
1778                        pstats->rxbroadcastframes_g -
1779                        pstats->rxmulticastframes_g -
1780                        pstats->rxunicastframes_g;
1781         s->multicast = pstats->rxmulticastframes_g;
1782         s->rx_length_errors = pstats->rxlengtherror;
1783         s->rx_crc_errors = pstats->rxcrcerror;
1784         s->rx_fifo_errors = pstats->rxfifooverflow;
1785
1786         s->tx_packets = pstats->txframecount_gb;
1787         s->tx_bytes = pstats->txoctetcount_gb;
1788         s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1789         s->tx_dropped = netdev->stats.tx_dropped;
1790
1791         DBGPR("<--%s\n", __func__);
1792 }
1793
1794 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1795                                 u16 vid)
1796 {
1797         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1798         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1799
1800         DBGPR("-->%s\n", __func__);
1801
1802         set_bit(vid, pdata->active_vlans);
1803         hw_if->update_vlan_hash_table(pdata);
1804
1805         DBGPR("<--%s\n", __func__);
1806
1807         return 0;
1808 }
1809
1810 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1811                                  u16 vid)
1812 {
1813         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1814         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1815
1816         DBGPR("-->%s\n", __func__);
1817
1818         clear_bit(vid, pdata->active_vlans);
1819         hw_if->update_vlan_hash_table(pdata);
1820
1821         DBGPR("<--%s\n", __func__);
1822
1823         return 0;
1824 }
1825
1826 #ifdef CONFIG_NET_POLL_CONTROLLER
1827 static void xgbe_poll_controller(struct net_device *netdev)
1828 {
1829         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1830         struct xgbe_channel *channel;
1831         unsigned int i;
1832
1833         DBGPR("-->xgbe_poll_controller\n");
1834
1835         if (pdata->per_channel_irq) {
1836                 channel = pdata->channel;
1837                 for (i = 0; i < pdata->channel_count; i++, channel++)
1838                         xgbe_dma_isr(channel->dma_irq, channel);
1839         } else {
1840                 disable_irq(pdata->dev_irq);
1841                 xgbe_isr(pdata->dev_irq, pdata);
1842                 enable_irq(pdata->dev_irq);
1843         }
1844
1845         DBGPR("<--xgbe_poll_controller\n");
1846 }
1847 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1848
1849 static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
1850                          struct tc_to_netdev *tc_to_netdev)
1851 {
1852         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1853         u8 tc;
1854
1855         if (tc_to_netdev->type != TC_SETUP_MQPRIO)
1856                 return -EINVAL;
1857
1858         tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1859         tc = tc_to_netdev->mqprio->num_tc;
1860
1861         if (tc > pdata->hw_feat.tc_cnt)
1862                 return -EINVAL;
1863
1864         pdata->num_tcs = tc;
1865         pdata->hw_if.config_tc(pdata);
1866
1867         return 0;
1868 }
1869
1870 static int xgbe_set_features(struct net_device *netdev,
1871                              netdev_features_t features)
1872 {
1873         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1874         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1875         netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1876         int ret = 0;
1877
1878         rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1879         rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1880         rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1881         rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1882
1883         if ((features & NETIF_F_RXHASH) && !rxhash)
1884                 ret = hw_if->enable_rss(pdata);
1885         else if (!(features & NETIF_F_RXHASH) && rxhash)
1886                 ret = hw_if->disable_rss(pdata);
1887         if (ret)
1888                 return ret;
1889
1890         if ((features & NETIF_F_RXCSUM) && !rxcsum)
1891                 hw_if->enable_rx_csum(pdata);
1892         else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1893                 hw_if->disable_rx_csum(pdata);
1894
1895         if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1896                 hw_if->enable_rx_vlan_stripping(pdata);
1897         else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1898                 hw_if->disable_rx_vlan_stripping(pdata);
1899
1900         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1901                 hw_if->enable_rx_vlan_filtering(pdata);
1902         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1903                 hw_if->disable_rx_vlan_filtering(pdata);
1904
1905         pdata->netdev_features = features;
1906
1907         DBGPR("<--xgbe_set_features\n");
1908
1909         return 0;
1910 }
1911
1912 static const struct net_device_ops xgbe_netdev_ops = {
1913         .ndo_open               = xgbe_open,
1914         .ndo_stop               = xgbe_close,
1915         .ndo_start_xmit         = xgbe_xmit,
1916         .ndo_set_rx_mode        = xgbe_set_rx_mode,
1917         .ndo_set_mac_address    = xgbe_set_mac_address,
1918         .ndo_validate_addr      = eth_validate_addr,
1919         .ndo_do_ioctl           = xgbe_ioctl,
1920         .ndo_change_mtu         = xgbe_change_mtu,
1921         .ndo_tx_timeout         = xgbe_tx_timeout,
1922         .ndo_get_stats64        = xgbe_get_stats64,
1923         .ndo_vlan_rx_add_vid    = xgbe_vlan_rx_add_vid,
1924         .ndo_vlan_rx_kill_vid   = xgbe_vlan_rx_kill_vid,
1925 #ifdef CONFIG_NET_POLL_CONTROLLER
1926         .ndo_poll_controller    = xgbe_poll_controller,
1927 #endif
1928         .ndo_setup_tc           = xgbe_setup_tc,
1929         .ndo_set_features       = xgbe_set_features,
1930 };
1931
1932 const struct net_device_ops *xgbe_get_netdev_ops(void)
1933 {
1934         return &xgbe_netdev_ops;
1935 }
1936
1937 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1938 {
1939         struct xgbe_prv_data *pdata = channel->pdata;
1940         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1941         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1942         struct xgbe_ring *ring = channel->rx_ring;
1943         struct xgbe_ring_data *rdata;
1944
1945         while (ring->dirty != ring->cur) {
1946                 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1947
1948                 /* Reset rdata values */
1949                 desc_if->unmap_rdata(pdata, rdata);
1950
1951                 if (desc_if->map_rx_buffer(pdata, ring, rdata))
1952                         break;
1953
1954                 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
1955
1956                 ring->dirty++;
1957         }
1958
1959         /* Make sure everything is written before the register write */
1960         wmb();
1961
1962         /* Update the Rx Tail Pointer Register with address of
1963          * the last cleaned entry */
1964         rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1965         XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1966                           lower_32_bits(rdata->rdesc_dma));
1967 }
1968
1969 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1970                                        struct napi_struct *napi,
1971                                        struct xgbe_ring_data *rdata,
1972                                        unsigned int len)
1973 {
1974         struct sk_buff *skb;
1975         u8 *packet;
1976
1977         skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1978         if (!skb)
1979                 return NULL;
1980
1981         /* Pull in the header buffer which may contain just the header
1982          * or the header plus data
1983          */
1984         dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1985                                       rdata->rx.hdr.dma_off,
1986                                       rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1987
1988         packet = page_address(rdata->rx.hdr.pa.pages) +
1989                  rdata->rx.hdr.pa.pages_offset;
1990         skb_copy_to_linear_data(skb, packet, len);
1991         skb_put(skb, len);
1992
1993         return skb;
1994 }
1995
1996 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
1997                                      struct xgbe_packet_data *packet)
1998 {
1999         /* Always zero if not the first descriptor */
2000         if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2001                 return 0;
2002
2003         /* First descriptor with split header, return header length */
2004         if (rdata->rx.hdr_len)
2005                 return rdata->rx.hdr_len;
2006
2007         /* First descriptor but not the last descriptor and no split header,
2008          * so the full buffer was used
2009          */
2010         if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2011                 return rdata->rx.hdr.dma_len;
2012
2013         /* First descriptor and last descriptor and no split header, so
2014          * calculate how much of the buffer was used
2015          */
2016         return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2017 }
2018
2019 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2020                                      struct xgbe_packet_data *packet,
2021                                      unsigned int len)
2022 {
2023         /* Always the full buffer if not the last descriptor */
2024         if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2025                 return rdata->rx.buf.dma_len;
2026
2027         /* Last descriptor so calculate how much of the buffer was used
2028          * for the last bit of data
2029          */
2030         return rdata->rx.len - len;
2031 }
2032
2033 static int xgbe_tx_poll(struct xgbe_channel *channel)
2034 {
2035         struct xgbe_prv_data *pdata = channel->pdata;
2036         struct xgbe_hw_if *hw_if = &pdata->hw_if;
2037         struct xgbe_desc_if *desc_if = &pdata->desc_if;
2038         struct xgbe_ring *ring = channel->tx_ring;
2039         struct xgbe_ring_data *rdata;
2040         struct xgbe_ring_desc *rdesc;
2041         struct net_device *netdev = pdata->netdev;
2042         struct netdev_queue *txq;
2043         int processed = 0;
2044         unsigned int tx_packets = 0, tx_bytes = 0;
2045         unsigned int cur;
2046
2047         DBGPR("-->xgbe_tx_poll\n");
2048
2049         /* Nothing to do if there isn't a Tx ring for this channel */
2050         if (!ring)
2051                 return 0;
2052
2053         cur = ring->cur;
2054
2055         /* Be sure we get ring->cur before accessing descriptor data */
2056         smp_rmb();
2057
2058         txq = netdev_get_tx_queue(netdev, channel->queue_index);
2059
2060         while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2061                (ring->dirty != cur)) {
2062                 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2063                 rdesc = rdata->rdesc;
2064
2065                 if (!hw_if->tx_complete(rdesc))
2066                         break;
2067
2068                 /* Make sure descriptor fields are read after reading the OWN
2069                  * bit */
2070                 dma_rmb();
2071
2072                 if (netif_msg_tx_done(pdata))
2073                         xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2074
2075                 if (hw_if->is_last_desc(rdesc)) {
2076                         tx_packets += rdata->tx.packets;
2077                         tx_bytes += rdata->tx.bytes;
2078                 }
2079
2080                 /* Free the SKB and reset the descriptor for re-use */
2081                 desc_if->unmap_rdata(pdata, rdata);
2082                 hw_if->tx_desc_reset(rdata);
2083
2084                 processed++;
2085                 ring->dirty++;
2086         }
2087
2088         if (!processed)
2089                 return 0;
2090
2091         netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2092
2093         if ((ring->tx.queue_stopped == 1) &&
2094             (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2095                 ring->tx.queue_stopped = 0;
2096                 netif_tx_wake_queue(txq);
2097         }
2098
2099         DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2100
2101         return processed;
2102 }
2103
2104 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2105 {
2106         struct xgbe_prv_data *pdata = channel->pdata;
2107         struct xgbe_hw_if *hw_if = &pdata->hw_if;
2108         struct xgbe_ring *ring = channel->rx_ring;
2109         struct xgbe_ring_data *rdata;
2110         struct xgbe_packet_data *packet;
2111         struct net_device *netdev = pdata->netdev;
2112         struct napi_struct *napi;
2113         struct sk_buff *skb;
2114         struct skb_shared_hwtstamps *hwtstamps;
2115         unsigned int last, error, context_next, context;
2116         unsigned int len, buf1_len, buf2_len, max_len;
2117         unsigned int received = 0;
2118         int packet_count = 0;
2119
2120         DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2121
2122         /* Nothing to do if there isn't a Rx ring for this channel */
2123         if (!ring)
2124                 return 0;
2125
2126         last = 0;
2127         context_next = 0;
2128
2129         napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2130
2131         rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2132         packet = &ring->packet_data;
2133         while (packet_count < budget) {
2134                 DBGPR("  cur = %d\n", ring->cur);
2135
2136                 /* First time in loop see if we need to restore state */
2137                 if (!received && rdata->state_saved) {
2138                         skb = rdata->state.skb;
2139                         error = rdata->state.error;
2140                         len = rdata->state.len;
2141                 } else {
2142                         memset(packet, 0, sizeof(*packet));
2143                         skb = NULL;
2144                         error = 0;
2145                         len = 0;
2146                 }
2147
2148 read_again:
2149                 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2150
2151                 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2152                         xgbe_rx_refresh(channel);
2153
2154                 if (hw_if->dev_read(channel))
2155                         break;
2156
2157                 received++;
2158                 ring->cur++;
2159
2160                 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2161                                       LAST);
2162                 context_next = XGMAC_GET_BITS(packet->attributes,
2163                                               RX_PACKET_ATTRIBUTES,
2164                                               CONTEXT_NEXT);
2165                 context = XGMAC_GET_BITS(packet->attributes,
2166                                          RX_PACKET_ATTRIBUTES,
2167                                          CONTEXT);
2168
2169                 /* Earlier error, just drain the remaining data */
2170                 if ((!last || context_next) && error)
2171                         goto read_again;
2172
2173                 if (error || packet->errors) {
2174                         if (packet->errors)
2175                                 netif_err(pdata, rx_err, netdev,
2176                                           "error in received packet\n");
2177                         dev_kfree_skb(skb);
2178                         goto next_packet;
2179                 }
2180
2181                 if (!context) {
2182                         /* Get the data length in the descriptor buffers */
2183                         buf1_len = xgbe_rx_buf1_len(rdata, packet);
2184                         len += buf1_len;
2185                         buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2186                         len += buf2_len;
2187
2188                         if (!skb) {
2189                                 skb = xgbe_create_skb(pdata, napi, rdata,
2190                                                       buf1_len);
2191                                 if (!skb) {
2192                                         error = 1;
2193                                         goto skip_data;
2194                                 }
2195                         }
2196
2197                         if (buf2_len) {
2198                                 dma_sync_single_range_for_cpu(pdata->dev,
2199                                                         rdata->rx.buf.dma_base,
2200                                                         rdata->rx.buf.dma_off,
2201                                                         rdata->rx.buf.dma_len,
2202                                                         DMA_FROM_DEVICE);
2203
2204                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2205                                                 rdata->rx.buf.pa.pages,
2206                                                 rdata->rx.buf.pa.pages_offset,
2207                                                 buf2_len,
2208                                                 rdata->rx.buf.dma_len);
2209                                 rdata->rx.buf.pa.pages = NULL;
2210                         }
2211                 }
2212
2213 skip_data:
2214                 if (!last || context_next)
2215                         goto read_again;
2216
2217                 if (!skb)
2218                         goto next_packet;
2219
2220                 /* Be sure we don't exceed the configured MTU */
2221                 max_len = netdev->mtu + ETH_HLEN;
2222                 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2223                     (skb->protocol == htons(ETH_P_8021Q)))
2224                         max_len += VLAN_HLEN;
2225
2226                 if (skb->len > max_len) {
2227                         netif_err(pdata, rx_err, netdev,
2228                                   "packet length exceeds configured MTU\n");
2229                         dev_kfree_skb(skb);
2230                         goto next_packet;
2231                 }
2232
2233                 if (netif_msg_pktdata(pdata))
2234                         xgbe_print_pkt(netdev, skb, false);
2235
2236                 skb_checksum_none_assert(skb);
2237                 if (XGMAC_GET_BITS(packet->attributes,
2238                                    RX_PACKET_ATTRIBUTES, CSUM_DONE))
2239                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2240
2241                 if (XGMAC_GET_BITS(packet->attributes,
2242                                    RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2243                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2244                                                packet->vlan_ctag);
2245
2246                 if (XGMAC_GET_BITS(packet->attributes,
2247                                    RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2248                         u64 nsec;
2249
2250                         nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2251                                                     packet->rx_tstamp);
2252                         hwtstamps = skb_hwtstamps(skb);
2253                         hwtstamps->hwtstamp = ns_to_ktime(nsec);
2254                 }
2255
2256                 if (XGMAC_GET_BITS(packet->attributes,
2257                                    RX_PACKET_ATTRIBUTES, RSS_HASH))
2258                         skb_set_hash(skb, packet->rss_hash,
2259                                      packet->rss_hash_type);
2260
2261                 skb->dev = netdev;
2262                 skb->protocol = eth_type_trans(skb, netdev);
2263                 skb_record_rx_queue(skb, channel->queue_index);
2264
2265                 napi_gro_receive(napi, skb);
2266
2267 next_packet:
2268                 packet_count++;
2269         }
2270
2271         /* Check if we need to save state before leaving */
2272         if (received && (!last || context_next)) {
2273                 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2274                 rdata->state_saved = 1;
2275                 rdata->state.skb = skb;
2276                 rdata->state.len = len;
2277                 rdata->state.error = error;
2278         }
2279
2280         DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2281
2282         return packet_count;
2283 }
2284
2285 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2286 {
2287         struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2288                                                     napi);
2289         struct xgbe_prv_data *pdata = channel->pdata;
2290         int processed = 0;
2291
2292         DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2293
2294         /* Cleanup Tx ring first */
2295         xgbe_tx_poll(channel);
2296
2297         /* Process Rx ring next */
2298         processed = xgbe_rx_poll(channel, budget);
2299
2300         /* If we processed everything, we are done */
2301         if ((processed < budget) && napi_complete_done(napi, processed)) {
2302                 /* Enable Tx and Rx interrupts */
2303                 if (pdata->channel_irq_mode)
2304                         xgbe_enable_rx_tx_int(pdata, channel);
2305                 else
2306                         enable_irq(channel->dma_irq);
2307         }
2308
2309         DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2310
2311         return processed;
2312 }
2313
2314 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2315 {
2316         struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2317                                                    napi);
2318         struct xgbe_channel *channel;
2319         int ring_budget;
2320         int processed, last_processed;
2321         unsigned int i;
2322
2323         DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2324
2325         processed = 0;
2326         ring_budget = budget / pdata->rx_ring_count;
2327         do {
2328                 last_processed = processed;
2329
2330                 channel = pdata->channel;
2331                 for (i = 0; i < pdata->channel_count; i++, channel++) {
2332                         /* Cleanup Tx ring first */
2333                         xgbe_tx_poll(channel);
2334
2335                         /* Process Rx ring next */
2336                         if (ring_budget > (budget - processed))
2337                                 ring_budget = budget - processed;
2338                         processed += xgbe_rx_poll(channel, ring_budget);
2339                 }
2340         } while ((processed < budget) && (processed != last_processed));
2341
2342         /* If we processed everything, we are done */
2343         if ((processed < budget) && napi_complete_done(napi, processed)) {
2344                 /* Enable Tx and Rx interrupts */
2345                 xgbe_enable_rx_tx_ints(pdata);
2346         }
2347
2348         DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2349
2350         return processed;
2351 }
2352
2353 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2354                        unsigned int idx, unsigned int count, unsigned int flag)
2355 {
2356         struct xgbe_ring_data *rdata;
2357         struct xgbe_ring_desc *rdesc;
2358
2359         while (count--) {
2360                 rdata = XGBE_GET_DESC_DATA(ring, idx);
2361                 rdesc = rdata->rdesc;
2362                 netdev_dbg(pdata->netdev,
2363                            "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2364                            (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2365                            le32_to_cpu(rdesc->desc0),
2366                            le32_to_cpu(rdesc->desc1),
2367                            le32_to_cpu(rdesc->desc2),
2368                            le32_to_cpu(rdesc->desc3));
2369                 idx++;
2370         }
2371 }
2372
2373 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2374                        unsigned int idx)
2375 {
2376         struct xgbe_ring_data *rdata;
2377         struct xgbe_ring_desc *rdesc;
2378
2379         rdata = XGBE_GET_DESC_DATA(ring, idx);
2380         rdesc = rdata->rdesc;
2381         netdev_dbg(pdata->netdev,
2382                    "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2383                    idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2384                    le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2385 }
2386
2387 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2388 {
2389         struct ethhdr *eth = (struct ethhdr *)skb->data;
2390         unsigned char *buf = skb->data;
2391         unsigned char buffer[128];
2392         unsigned int i, j;
2393
2394         netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2395
2396         netdev_dbg(netdev, "%s packet of %d bytes\n",
2397                    (tx_rx ? "TX" : "RX"), skb->len);
2398
2399         netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2400         netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2401         netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2402
2403         for (i = 0, j = 0; i < skb->len;) {
2404                 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
2405                               buf[i++]);
2406
2407                 if ((i % 32) == 0) {
2408                         netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
2409                         j = 0;
2410                 } else if ((i % 16) == 0) {
2411                         buffer[j++] = ' ';
2412                         buffer[j++] = ' ';
2413                 } else if ((i % 4) == 0) {
2414                         buffer[j++] = ' ';
2415                 }
2416         }
2417         if (i % 32)
2418                 netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
2419
2420         netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2421 }