2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
124 #include "xgbe-common.h"
126 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
132 DBGPR("-->xgbe_usec_to_riwt\n");
134 rate = pdata->sysclk_rate;
137 * Convert the input usec value to the watchdog timer value. Each
138 * watchdog timer value is equivalent to 256 clock cycles.
139 * Calculate the required value as:
140 * ( usec * ( system_clock_mhz / 10^6 ) / 256
142 ret = (usec * (rate / 1000000)) / 256;
144 DBGPR("<--xgbe_usec_to_riwt\n");
149 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
155 DBGPR("-->xgbe_riwt_to_usec\n");
157 rate = pdata->sysclk_rate;
160 * Convert the input watchdog timer value to the usec value. Each
161 * watchdog timer value is equivalent to 256 clock cycles.
162 * Calculate the required value as:
163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
165 ret = (riwt * 256) / (rate / 1000000);
167 DBGPR("<--xgbe_riwt_to_usec\n");
172 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
174 struct xgbe_channel *channel;
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++)
179 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
185 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
187 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
190 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
192 struct xgbe_channel *channel;
195 channel = pdata->channel;
196 for (i = 0; i < pdata->channel_count; i++, channel++) {
197 if (!channel->tx_ring)
200 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
207 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
209 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
212 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
214 struct xgbe_channel *channel;
217 channel = pdata->channel;
218 for (i = 0; i < pdata->channel_count; i++, channel++) {
219 if (!channel->rx_ring)
222 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
229 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
231 struct xgbe_channel *channel;
234 channel = pdata->channel;
235 for (i = 0; i < pdata->channel_count; i++, channel++) {
236 if (!channel->tx_ring)
239 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
246 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
250 for (i = 0; i < pdata->rx_q_count; i++)
251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
256 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
260 for (i = 0; i < pdata->tx_q_count; i++)
261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
266 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
271 for (i = 0; i < pdata->rx_q_count; i++)
272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
277 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
282 for (i = 0; i < pdata->tx_q_count; i++)
283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
288 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
290 struct xgbe_channel *channel;
293 channel = pdata->channel;
294 for (i = 0; i < pdata->channel_count; i++, channel++) {
295 if (!channel->rx_ring)
298 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
305 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
310 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
312 struct xgbe_channel *channel;
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->rx_ring)
320 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
325 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
327 struct xgbe_channel *channel;
330 channel = pdata->channel;
331 for (i = 0; i < pdata->channel_count; i++, channel++) {
332 if (!channel->tx_ring)
335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
339 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
341 struct xgbe_channel *channel;
344 channel = pdata->channel;
345 for (i = 0; i < pdata->channel_count; i++, channel++) {
346 if (!channel->rx_ring)
349 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
352 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
355 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
356 unsigned int index, unsigned int val)
361 mutex_lock(&pdata->rss_mutex);
363 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
368 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
370 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
371 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
372 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
373 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
377 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
380 usleep_range(1000, 1500);
386 mutex_unlock(&pdata->rss_mutex);
391 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
393 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
394 unsigned int *key = (unsigned int *)&pdata->rss_key;
398 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
407 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
412 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
413 ret = xgbe_write_rss_reg(pdata,
414 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
415 pdata->rss_table[i]);
423 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
425 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
427 return xgbe_write_rss_hash_key(pdata);
430 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
435 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
436 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
438 return xgbe_write_rss_lookup_table(pdata);
441 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
445 if (!pdata->hw_feat.rss)
448 /* Program the hash key */
449 ret = xgbe_write_rss_hash_key(pdata);
453 /* Program the lookup table */
454 ret = xgbe_write_rss_lookup_table(pdata);
458 /* Set the RSS options */
459 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
462 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
467 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
469 if (!pdata->hw_feat.rss)
472 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
477 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
481 if (!pdata->hw_feat.rss)
484 if (pdata->netdev->features & NETIF_F_RXHASH)
485 ret = xgbe_enable_rss(pdata);
487 ret = xgbe_disable_rss(pdata);
490 netdev_err(pdata->netdev,
491 "error configuring RSS, RSS disabled\n");
494 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
496 unsigned int max_q_count, q_count;
497 unsigned int reg, reg_val;
500 /* Clear MTL flow control */
501 for (i = 0; i < pdata->rx_q_count; i++)
502 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
504 /* Clear MAC flow control */
505 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
506 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
508 for (i = 0; i < q_count; i++) {
509 reg_val = XGMAC_IOREAD(pdata, reg);
510 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
511 XGMAC_IOWRITE(pdata, reg, reg_val);
513 reg += MAC_QTFCR_INC;
519 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
521 unsigned int max_q_count, q_count;
522 unsigned int reg, reg_val;
525 /* Set MTL flow control */
526 for (i = 0; i < pdata->rx_q_count; i++)
527 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
529 /* Set MAC flow control */
530 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
531 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
533 for (i = 0; i < q_count; i++) {
534 reg_val = XGMAC_IOREAD(pdata, reg);
536 /* Enable transmit flow control */
537 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
539 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
541 XGMAC_IOWRITE(pdata, reg, reg_val);
543 reg += MAC_QTFCR_INC;
549 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
551 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
556 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
558 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
563 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
565 struct ieee_pfc *pfc = pdata->pfc;
567 if (pdata->tx_pause || (pfc && pfc->pfc_en))
568 xgbe_enable_tx_flow_control(pdata);
570 xgbe_disable_tx_flow_control(pdata);
575 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
577 struct ieee_pfc *pfc = pdata->pfc;
579 if (pdata->rx_pause || (pfc && pfc->pfc_en))
580 xgbe_enable_rx_flow_control(pdata);
582 xgbe_disable_rx_flow_control(pdata);
587 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
589 struct ieee_pfc *pfc = pdata->pfc;
591 xgbe_config_tx_flow_control(pdata);
592 xgbe_config_rx_flow_control(pdata);
594 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
595 (pfc && pfc->pfc_en) ? 1 : 0);
598 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
600 struct xgbe_channel *channel;
601 unsigned int dma_ch_isr, dma_ch_ier;
604 channel = pdata->channel;
605 for (i = 0; i < pdata->channel_count; i++, channel++) {
606 /* Clear all the interrupts which are set */
607 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
608 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
610 /* Clear all interrupt enable bits */
613 /* Enable following interrupts
614 * NIE - Normal Interrupt Summary Enable
615 * AIE - Abnormal Interrupt Summary Enable
616 * FBEE - Fatal Bus Error Enable
618 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
619 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
620 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
622 if (channel->tx_ring) {
623 /* Enable the following Tx interrupts
624 * TIE - Transmit Interrupt Enable (unless using
625 * per channel interrupts)
627 if (!pdata->per_channel_irq)
628 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
630 if (channel->rx_ring) {
631 /* Enable following Rx interrupts
632 * RBUE - Receive Buffer Unavailable Enable
633 * RIE - Receive Interrupt Enable (unless using
634 * per channel interrupts)
636 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
637 if (!pdata->per_channel_irq)
638 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
641 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
645 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
647 unsigned int mtl_q_isr;
648 unsigned int q_count, i;
650 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
651 for (i = 0; i < q_count; i++) {
652 /* Clear all the interrupts which are set */
653 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
654 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
656 /* No MTL interrupts to be enabled */
657 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
661 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
663 unsigned int mac_ier = 0;
665 /* Enable Timestamp interrupt */
666 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
668 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
670 /* Enable all counter interrupts */
671 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
672 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
675 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
677 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
680 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
685 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
687 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
695 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
697 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
700 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
705 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
708 unsigned int val = enable ? 1 : 0;
710 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
713 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
714 enable ? "entering" : "leaving");
715 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
720 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
723 unsigned int val = enable ? 1 : 0;
725 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
728 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
729 enable ? "entering" : "leaving");
730 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
735 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
736 struct netdev_hw_addr *ha, unsigned int *mac_reg)
738 unsigned int mac_addr_hi, mac_addr_lo;
745 mac_addr = (u8 *)&mac_addr_lo;
746 mac_addr[0] = ha->addr[0];
747 mac_addr[1] = ha->addr[1];
748 mac_addr[2] = ha->addr[2];
749 mac_addr[3] = ha->addr[3];
750 mac_addr = (u8 *)&mac_addr_hi;
751 mac_addr[0] = ha->addr[4];
752 mac_addr[1] = ha->addr[5];
754 netif_dbg(pdata, drv, pdata->netdev,
755 "adding mac address %pM at %#x\n",
758 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
761 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
762 *mac_reg += MAC_MACA_INC;
763 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
764 *mac_reg += MAC_MACA_INC;
767 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
769 struct net_device *netdev = pdata->netdev;
770 struct netdev_hw_addr *ha;
771 unsigned int mac_reg;
772 unsigned int addn_macs;
774 mac_reg = MAC_MACA1HR;
775 addn_macs = pdata->hw_feat.addn_mac;
777 if (netdev_uc_count(netdev) > addn_macs) {
778 xgbe_set_promiscuous_mode(pdata, 1);
780 netdev_for_each_uc_addr(ha, netdev) {
781 xgbe_set_mac_reg(pdata, ha, &mac_reg);
785 if (netdev_mc_count(netdev) > addn_macs) {
786 xgbe_set_all_multicast_mode(pdata, 1);
788 netdev_for_each_mc_addr(ha, netdev) {
789 xgbe_set_mac_reg(pdata, ha, &mac_reg);
795 /* Clear remaining additional MAC address entries */
797 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
800 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
802 struct net_device *netdev = pdata->netdev;
803 struct netdev_hw_addr *ha;
804 unsigned int hash_reg;
805 unsigned int hash_table_shift, hash_table_count;
806 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
810 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
811 hash_table_count = pdata->hw_feat.hash_table_size / 32;
812 memset(hash_table, 0, sizeof(hash_table));
814 /* Build the MAC Hash Table register values */
815 netdev_for_each_uc_addr(ha, netdev) {
816 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
817 crc >>= hash_table_shift;
818 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
821 netdev_for_each_mc_addr(ha, netdev) {
822 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
823 crc >>= hash_table_shift;
824 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
827 /* Set the MAC Hash Table registers */
829 for (i = 0; i < hash_table_count; i++) {
830 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
831 hash_reg += MAC_HTR_INC;
835 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
837 if (pdata->hw_feat.hash_table_size)
838 xgbe_set_mac_hash_table(pdata);
840 xgbe_set_mac_addn_addrs(pdata);
845 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
847 unsigned int mac_addr_hi, mac_addr_lo;
849 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
850 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
851 (addr[1] << 8) | (addr[0] << 0);
853 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
854 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
859 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
861 struct net_device *netdev = pdata->netdev;
862 unsigned int pr_mode, am_mode;
864 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
865 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
867 xgbe_set_promiscuous_mode(pdata, pr_mode);
868 xgbe_set_all_multicast_mode(pdata, am_mode);
870 xgbe_add_mac_addresses(pdata);
875 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
878 unsigned int mmd_address;
881 if (mmd_reg & MII_ADDR_C45)
882 mmd_address = mmd_reg & ~MII_ADDR_C45;
884 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
886 /* The PCS registers are accessed using mmio. The underlying APB3
887 * management interface uses indirect addressing to access the MMD
888 * register sets. This requires accessing of the PCS register in two
889 * phases, an address phase and a data phase.
891 * The mmio interface is based on 32-bit offsets and values. All
892 * register offsets must therefore be adjusted by left shifting the
893 * offset 2 bits and reading 32 bits of data.
895 mutex_lock(&pdata->xpcs_mutex);
896 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
897 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
898 mutex_unlock(&pdata->xpcs_mutex);
903 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
904 int mmd_reg, int mmd_data)
906 unsigned int mmd_address;
908 if (mmd_reg & MII_ADDR_C45)
909 mmd_address = mmd_reg & ~MII_ADDR_C45;
911 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
913 /* The PCS registers are accessed using mmio. The underlying APB3
914 * management interface uses indirect addressing to access the MMD
915 * register sets. This requires accessing of the PCS register in two
916 * phases, an address phase and a data phase.
918 * The mmio interface is based on 32-bit offsets and values. All
919 * register offsets must therefore be adjusted by left shifting the
920 * offset 2 bits and reading 32 bits of data.
922 mutex_lock(&pdata->xpcs_mutex);
923 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
924 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
925 mutex_unlock(&pdata->xpcs_mutex);
928 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
930 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
933 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
935 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
940 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
942 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
947 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
949 /* Put the VLAN tag in the Rx descriptor */
950 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
952 /* Don't check the VLAN type */
953 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
955 /* Check only C-TAG (0x8100) packets */
956 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
958 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
959 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
961 /* Enable VLAN tag stripping */
962 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
967 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
969 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
974 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
976 /* Enable VLAN filtering */
977 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
979 /* Enable VLAN Hash Table filtering */
980 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
982 /* Disable VLAN tag inverse matching */
983 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
985 /* Only filter on the lower 12-bits of the VLAN tag */
986 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
988 /* In order for the VLAN Hash Table filtering to be effective,
989 * the VLAN tag identifier in the VLAN Tag Register must not
990 * be zero. Set the VLAN tag identifier to "1" to enable the
991 * VLAN Hash Table filtering. This implies that a VLAN tag of
992 * 1 will always pass filtering.
994 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
999 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
1001 /* Disable VLAN filtering */
1002 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
1008 #define CRCPOLY_LE 0xedb88320
1010 static u32 xgbe_vid_crc32_le(__le16 vid_le)
1012 u32 poly = CRCPOLY_LE;
1015 unsigned char *data = (unsigned char *)&vid_le;
1016 unsigned char data_byte = 0;
1019 bits = get_bitmask_order(VLAN_VID_MASK);
1020 for (i = 0; i < bits; i++) {
1022 data_byte = data[i / 8];
1024 temp = ((crc & 1) ^ data_byte) & 1;
1035 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
1040 u16 vlan_hash_table = 0;
1042 /* Generate the VLAN Hash Table value */
1043 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
1044 /* Get the CRC32 value of the VLAN ID */
1045 vid_le = cpu_to_le16(vid);
1046 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
1048 vlan_hash_table |= (1 << crc);
1051 /* Set the VLAN Hash Table filtering register */
1052 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
1057 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1059 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1061 /* Reset the Tx descriptor
1062 * Set buffer 1 (lo) address to zero
1063 * Set buffer 1 (hi) address to zero
1064 * Reset all other control bits (IC, TTSE, B2L & B1L)
1065 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1072 /* Make sure ownership is written to the descriptor */
1076 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1078 struct xgbe_ring *ring = channel->tx_ring;
1079 struct xgbe_ring_data *rdata;
1081 int start_index = ring->cur;
1083 DBGPR("-->tx_desc_init\n");
1085 /* Initialze all descriptors */
1086 for (i = 0; i < ring->rdesc_count; i++) {
1087 rdata = XGBE_GET_DESC_DATA(ring, i);
1089 /* Initialize Tx descriptor */
1090 xgbe_tx_desc_reset(rdata);
1093 /* Update the total number of Tx descriptors */
1094 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1096 /* Update the starting address of descriptor ring */
1097 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1098 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1099 upper_32_bits(rdata->rdesc_dma));
1100 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1101 lower_32_bits(rdata->rdesc_dma));
1103 DBGPR("<--tx_desc_init\n");
1106 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1107 struct xgbe_ring_data *rdata, unsigned int index)
1109 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1110 unsigned int rx_usecs = pdata->rx_usecs;
1111 unsigned int rx_frames = pdata->rx_frames;
1113 dma_addr_t hdr_dma, buf_dma;
1115 if (!rx_usecs && !rx_frames) {
1116 /* No coalescing, interrupt for every descriptor */
1119 /* Set interrupt based on Rx frame coalescing setting */
1120 if (rx_frames && !((index + 1) % rx_frames))
1126 /* Reset the Rx descriptor
1127 * Set buffer 1 (lo) address to header dma address (lo)
1128 * Set buffer 1 (hi) address to header dma address (hi)
1129 * Set buffer 2 (lo) address to buffer dma address (lo)
1130 * Set buffer 2 (hi) address to buffer dma address (hi) and
1131 * set control bits OWN and INTE
1133 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1134 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1135 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1136 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1137 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1138 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1140 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1142 /* Since the Rx DMA engine is likely running, make sure everything
1143 * is written to the descriptor(s) before setting the OWN bit
1144 * for the descriptor
1148 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1150 /* Make sure ownership is written to the descriptor */
1154 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1156 struct xgbe_prv_data *pdata = channel->pdata;
1157 struct xgbe_ring *ring = channel->rx_ring;
1158 struct xgbe_ring_data *rdata;
1159 unsigned int start_index = ring->cur;
1162 DBGPR("-->rx_desc_init\n");
1164 /* Initialize all descriptors */
1165 for (i = 0; i < ring->rdesc_count; i++) {
1166 rdata = XGBE_GET_DESC_DATA(ring, i);
1168 /* Initialize Rx descriptor */
1169 xgbe_rx_desc_reset(pdata, rdata, i);
1172 /* Update the total number of Rx descriptors */
1173 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1175 /* Update the starting address of descriptor ring */
1176 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1177 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1178 upper_32_bits(rdata->rdesc_dma));
1179 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1180 lower_32_bits(rdata->rdesc_dma));
1182 /* Update the Rx Descriptor Tail Pointer */
1183 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1184 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1185 lower_32_bits(rdata->rdesc_dma));
1187 DBGPR("<--rx_desc_init\n");
1190 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1191 unsigned int addend)
1193 /* Set the addend register value and tell the device */
1194 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1195 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1197 /* Wait for addend update to complete */
1198 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1202 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1205 /* Set the time values and tell the device */
1206 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1207 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1208 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1210 /* Wait for time update to complete */
1211 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1215 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1219 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1220 nsec *= NSEC_PER_SEC;
1221 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1226 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1228 unsigned int tx_snr;
1231 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1232 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1235 nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
1236 nsec *= NSEC_PER_SEC;
1242 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1243 struct xgbe_ring_desc *rdesc)
1247 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1248 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1249 nsec = le32_to_cpu(rdesc->desc1);
1251 nsec |= le32_to_cpu(rdesc->desc0);
1252 if (nsec != 0xffffffffffffffffULL) {
1253 packet->rx_tstamp = nsec;
1254 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1260 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1261 unsigned int mac_tscr)
1263 /* Set one nano-second accuracy */
1264 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1266 /* Set fine timestamp update */
1267 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1269 /* Overwrite earlier timestamps */
1270 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1272 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1274 /* Exit if timestamping is not enabled */
1275 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1278 /* Initialize time registers */
1279 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1280 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1281 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1282 xgbe_set_tstamp_time(pdata, 0, 0);
1284 /* Initialize the timecounter */
1285 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1286 ktime_to_ns(ktime_get_real()));
1291 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
1293 struct ieee_ets *ets = pdata->ets;
1294 unsigned int total_weight, min_weight, weight;
1300 /* Set Tx to deficit weighted round robin scheduling algorithm (when
1301 * traffic class is using ETS algorithm)
1303 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
1305 /* Set Traffic Class algorithms */
1306 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
1307 min_weight = total_weight / 100;
1311 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1312 switch (ets->tc_tsa[i]) {
1313 case IEEE_8021QAZ_TSA_STRICT:
1314 netif_dbg(pdata, drv, pdata->netdev,
1315 "TC%u using SP\n", i);
1316 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1319 case IEEE_8021QAZ_TSA_ETS:
1320 weight = total_weight * ets->tc_tx_bw[i] / 100;
1321 weight = clamp(weight, min_weight, total_weight);
1323 netif_dbg(pdata, drv, pdata->netdev,
1324 "TC%u using DWRR (weight %u)\n", i, weight);
1325 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1327 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
1334 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
1336 struct ieee_pfc *pfc = pdata->pfc;
1337 struct ieee_ets *ets = pdata->ets;
1338 unsigned int mask, reg, reg_val;
1339 unsigned int tc, prio;
1344 for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
1346 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
1347 if ((pfc->pfc_en & (1 << prio)) &&
1348 (ets->prio_tc[prio] == tc))
1349 mask |= (1 << prio);
1353 netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
1355 reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
1356 reg_val = XGMAC_IOREAD(pdata, reg);
1358 reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
1359 reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
1361 XGMAC_IOWRITE(pdata, reg, reg_val);
1364 xgbe_config_flow_control(pdata);
1367 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1368 struct xgbe_ring *ring)
1370 struct xgbe_prv_data *pdata = channel->pdata;
1371 struct xgbe_ring_data *rdata;
1373 /* Make sure everything is written before the register write */
1376 /* Issue a poll command to Tx DMA by writing address
1377 * of next immediate free descriptor */
1378 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1379 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1380 lower_32_bits(rdata->rdesc_dma));
1382 /* Start the Tx timer */
1383 if (pdata->tx_usecs && !channel->tx_timer_active) {
1384 channel->tx_timer_active = 1;
1385 mod_timer(&channel->tx_timer,
1386 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1389 ring->tx.xmit_more = 0;
1392 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1394 struct xgbe_prv_data *pdata = channel->pdata;
1395 struct xgbe_ring *ring = channel->tx_ring;
1396 struct xgbe_ring_data *rdata;
1397 struct xgbe_ring_desc *rdesc;
1398 struct xgbe_packet_data *packet = &ring->packet_data;
1399 unsigned int csum, tso, vlan;
1400 unsigned int tso_context, vlan_context;
1401 unsigned int tx_set_ic;
1402 int start_index = ring->cur;
1403 int cur_index = ring->cur;
1406 DBGPR("-->xgbe_dev_xmit\n");
1408 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1410 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1412 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1415 if (tso && (packet->mss != ring->tx.cur_mss))
1420 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1425 /* Determine if an interrupt should be generated for this Tx:
1427 * - Tx frame count exceeds the frame count setting
1428 * - Addition of Tx frame count to the frame count since the
1429 * last interrupt was set exceeds the frame count setting
1431 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1432 * - Addition of Tx frame count to the frame count since the
1433 * last interrupt was set does not exceed the frame count setting
1435 ring->coalesce_count += packet->tx_packets;
1436 if (!pdata->tx_frames)
1438 else if (packet->tx_packets > pdata->tx_frames)
1440 else if ((ring->coalesce_count % pdata->tx_frames) <
1446 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1447 rdesc = rdata->rdesc;
1449 /* Create a context descriptor if this is a TSO packet */
1450 if (tso_context || vlan_context) {
1452 netif_dbg(pdata, tx_queued, pdata->netdev,
1453 "TSO context descriptor, mss=%u\n",
1456 /* Set the MSS size */
1457 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1460 /* Mark it as a CONTEXT descriptor */
1461 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1464 /* Indicate this descriptor contains the MSS */
1465 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1468 ring->tx.cur_mss = packet->mss;
1472 netif_dbg(pdata, tx_queued, pdata->netdev,
1473 "VLAN context descriptor, ctag=%u\n",
1476 /* Mark it as a CONTEXT descriptor */
1477 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1480 /* Set the VLAN tag */
1481 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1482 VT, packet->vlan_ctag);
1484 /* Indicate this descriptor contains the VLAN tag */
1485 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1488 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1492 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1493 rdesc = rdata->rdesc;
1496 /* Update buffer address (for TSO this is the header) */
1497 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1498 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1500 /* Update the buffer length */
1501 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1502 rdata->skb_dma_len);
1504 /* VLAN tag insertion check */
1506 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1507 TX_NORMAL_DESC2_VLAN_INSERT);
1509 /* Timestamp enablement check */
1510 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1511 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1513 /* Mark it as First Descriptor */
1514 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1516 /* Mark it as a NORMAL descriptor */
1517 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1519 /* Set OWN bit if not the first descriptor */
1520 if (cur_index != start_index)
1521 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1525 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1526 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1527 packet->tcp_payload_len);
1528 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1529 packet->tcp_header_len / 4);
1531 pdata->ext_stats.tx_tso_packets++;
1533 /* Enable CRC and Pad Insertion */
1534 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1536 /* Enable HW CSUM */
1538 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1541 /* Set the total length to be transmitted */
1542 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1546 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1548 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1549 rdesc = rdata->rdesc;
1551 /* Update buffer address */
1552 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1553 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1555 /* Update the buffer length */
1556 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1557 rdata->skb_dma_len);
1560 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1562 /* Mark it as NORMAL descriptor */
1563 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1565 /* Enable HW CSUM */
1567 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1571 /* Set LAST bit for the last descriptor */
1572 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1574 /* Set IC bit based on Tx coalescing settings */
1576 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1578 /* Save the Tx info to report back during cleanup */
1579 rdata->tx.packets = packet->tx_packets;
1580 rdata->tx.bytes = packet->tx_bytes;
1582 /* In case the Tx DMA engine is running, make sure everything
1583 * is written to the descriptor(s) before setting the OWN bit
1584 * for the first descriptor
1588 /* Set OWN bit for the first descriptor */
1589 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1590 rdesc = rdata->rdesc;
1591 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1593 if (netif_msg_tx_queued(pdata))
1594 xgbe_dump_tx_desc(pdata, ring, start_index,
1595 packet->rdesc_count, 1);
1597 /* Make sure ownership is written to the descriptor */
1600 ring->cur = cur_index + 1;
1601 if (!packet->skb->xmit_more ||
1602 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1603 channel->queue_index)))
1604 xgbe_tx_start_xmit(channel, ring);
1606 ring->tx.xmit_more = 1;
1608 DBGPR(" %s: descriptors %u to %u written\n",
1609 channel->name, start_index & (ring->rdesc_count - 1),
1610 (ring->cur - 1) & (ring->rdesc_count - 1));
1612 DBGPR("<--xgbe_dev_xmit\n");
1615 static int xgbe_dev_read(struct xgbe_channel *channel)
1617 struct xgbe_prv_data *pdata = channel->pdata;
1618 struct xgbe_ring *ring = channel->rx_ring;
1619 struct xgbe_ring_data *rdata;
1620 struct xgbe_ring_desc *rdesc;
1621 struct xgbe_packet_data *packet = &ring->packet_data;
1622 struct net_device *netdev = pdata->netdev;
1623 unsigned int err, etlt, l34t;
1625 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1627 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1628 rdesc = rdata->rdesc;
1630 /* Check for data availability */
1631 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1634 /* Make sure descriptor fields are read after reading the OWN bit */
1637 if (netif_msg_rx_status(pdata))
1638 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1640 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1641 /* Timestamp Context Descriptor */
1642 xgbe_get_rx_tstamp(packet, rdesc);
1644 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1646 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1651 /* Normal Descriptor, be sure Context Descriptor bit is off */
1652 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1654 /* Indicate if a Context Descriptor is next */
1655 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1656 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1659 /* Get the header length */
1660 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1661 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1662 RX_NORMAL_DESC2, HL);
1663 if (rdata->rx.hdr_len)
1664 pdata->ext_stats.rx_split_header_packets++;
1667 /* Get the RSS hash */
1668 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1669 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1672 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1674 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1676 case RX_DESC3_L34T_IPV4_TCP:
1677 case RX_DESC3_L34T_IPV4_UDP:
1678 case RX_DESC3_L34T_IPV6_TCP:
1679 case RX_DESC3_L34T_IPV6_UDP:
1680 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1683 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1687 /* Get the packet length */
1688 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1690 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1691 /* Not all the data has been transferred for this packet */
1692 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1697 /* This is the last of the data for this packet */
1698 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1701 /* Set checksum done indicator as appropriate */
1702 if (netdev->features & NETIF_F_RXCSUM)
1703 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1706 /* Check for errors (only valid in last descriptor) */
1707 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1708 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1709 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
1711 if (!err || !etlt) {
1712 /* No error if err is 0 or etlt is 0 */
1713 if ((etlt == 0x09) &&
1714 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1715 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1717 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1720 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1724 if ((etlt == 0x05) || (etlt == 0x06))
1725 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1728 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1732 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1733 ring->cur & (ring->rdesc_count - 1), ring->cur);
1738 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1740 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1741 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1744 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1746 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1747 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1750 static int xgbe_enable_int(struct xgbe_channel *channel,
1751 enum xgbe_int int_id)
1753 unsigned int dma_ch_ier;
1755 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1758 case XGMAC_INT_DMA_CH_SR_TI:
1759 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1761 case XGMAC_INT_DMA_CH_SR_TPS:
1762 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
1764 case XGMAC_INT_DMA_CH_SR_TBU:
1765 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
1767 case XGMAC_INT_DMA_CH_SR_RI:
1768 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1770 case XGMAC_INT_DMA_CH_SR_RBU:
1771 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
1773 case XGMAC_INT_DMA_CH_SR_RPS:
1774 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
1776 case XGMAC_INT_DMA_CH_SR_TI_RI:
1777 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1778 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1780 case XGMAC_INT_DMA_CH_SR_FBE:
1781 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
1783 case XGMAC_INT_DMA_ALL:
1784 dma_ch_ier |= channel->saved_ier;
1790 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1795 static int xgbe_disable_int(struct xgbe_channel *channel,
1796 enum xgbe_int int_id)
1798 unsigned int dma_ch_ier;
1800 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1803 case XGMAC_INT_DMA_CH_SR_TI:
1804 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1806 case XGMAC_INT_DMA_CH_SR_TPS:
1807 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
1809 case XGMAC_INT_DMA_CH_SR_TBU:
1810 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
1812 case XGMAC_INT_DMA_CH_SR_RI:
1813 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1815 case XGMAC_INT_DMA_CH_SR_RBU:
1816 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
1818 case XGMAC_INT_DMA_CH_SR_RPS:
1819 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
1821 case XGMAC_INT_DMA_CH_SR_TI_RI:
1822 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1823 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1825 case XGMAC_INT_DMA_CH_SR_FBE:
1826 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
1828 case XGMAC_INT_DMA_ALL:
1829 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
1830 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
1836 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1841 static int xgbe_exit(struct xgbe_prv_data *pdata)
1843 unsigned int count = 2000;
1845 DBGPR("-->xgbe_exit\n");
1847 /* Issue a software reset */
1848 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1849 usleep_range(10, 15);
1851 /* Poll Until Poll Condition */
1852 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1853 usleep_range(500, 600);
1858 DBGPR("<--xgbe_exit\n");
1863 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1865 unsigned int i, count;
1867 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1870 for (i = 0; i < pdata->tx_q_count; i++)
1871 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1873 /* Poll Until Poll Condition */
1874 for (i = 0; i < pdata->tx_q_count; i++) {
1876 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1878 usleep_range(500, 600);
1887 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1889 /* Set enhanced addressing mode */
1890 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1892 /* Set the System Bus mode */
1893 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1894 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
1897 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1899 unsigned int arcache, awcache;
1902 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
1903 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
1904 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
1905 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
1906 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
1907 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
1908 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1911 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
1912 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
1913 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
1914 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
1915 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
1916 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
1917 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
1918 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
1919 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1922 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1926 /* Set Tx to weighted round robin scheduling algorithm */
1927 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1929 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1930 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1931 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1933 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1936 /* Set Rx to strict priority algorithm */
1937 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1940 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
1941 unsigned int queue_count)
1943 unsigned int q_fifo_size = 0;
1944 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1946 /* Calculate Tx/Rx fifo share per queue */
1947 switch (fifo_size) {
1949 q_fifo_size = XGBE_FIFO_SIZE_B(128);
1952 q_fifo_size = XGBE_FIFO_SIZE_B(256);
1955 q_fifo_size = XGBE_FIFO_SIZE_B(512);
1958 q_fifo_size = XGBE_FIFO_SIZE_KB(1);
1961 q_fifo_size = XGBE_FIFO_SIZE_KB(2);
1964 q_fifo_size = XGBE_FIFO_SIZE_KB(4);
1967 q_fifo_size = XGBE_FIFO_SIZE_KB(8);
1970 q_fifo_size = XGBE_FIFO_SIZE_KB(16);
1973 q_fifo_size = XGBE_FIFO_SIZE_KB(32);
1976 q_fifo_size = XGBE_FIFO_SIZE_KB(64);
1979 q_fifo_size = XGBE_FIFO_SIZE_KB(128);
1982 q_fifo_size = XGBE_FIFO_SIZE_KB(256);
1986 /* The configured value is not the actual amount of fifo RAM */
1987 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
1989 q_fifo_size = q_fifo_size / queue_count;
1991 /* Set the queue fifo size programmable value */
1992 if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
1993 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
1994 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
1995 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
1996 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
1997 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
1998 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
1999 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
2000 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
2001 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
2002 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
2003 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
2004 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
2005 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
2006 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
2007 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
2008 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
2009 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
2010 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
2011 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
2012 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
2013 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
2018 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2020 enum xgbe_mtl_fifo_size fifo_size;
2023 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
2026 for (i = 0; i < pdata->tx_q_count; i++)
2027 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
2029 netif_info(pdata, drv, pdata->netdev,
2030 "%d Tx hardware queues, %d byte fifo per queue\n",
2031 pdata->tx_q_count, ((fifo_size + 1) * 256));
2034 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2036 enum xgbe_mtl_fifo_size fifo_size;
2039 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
2042 for (i = 0; i < pdata->rx_q_count; i++)
2043 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
2045 netif_info(pdata, drv, pdata->netdev,
2046 "%d Rx hardware queues, %d byte fifo per queue\n",
2047 pdata->rx_q_count, ((fifo_size + 1) * 256));
2050 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2052 unsigned int qptc, qptc_extra, queue;
2053 unsigned int prio_queues;
2054 unsigned int ppq, ppq_extra, prio;
2056 unsigned int i, j, reg, reg_val;
2058 /* Map the MTL Tx Queues to Traffic Classes
2059 * Note: Tx Queues >= Traffic Classes
2061 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2062 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2064 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2065 for (j = 0; j < qptc; j++) {
2066 netif_dbg(pdata, drv, pdata->netdev,
2067 "TXq%u mapped to TC%u\n", queue, i);
2068 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2070 pdata->q2tc_map[queue++] = i;
2073 if (i < qptc_extra) {
2074 netif_dbg(pdata, drv, pdata->netdev,
2075 "TXq%u mapped to TC%u\n", queue, i);
2076 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2078 pdata->q2tc_map[queue++] = i;
2082 /* Map the 8 VLAN priority values to available MTL Rx queues */
2083 prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
2085 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2086 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2090 for (i = 0, prio = 0; i < prio_queues;) {
2092 for (j = 0; j < ppq; j++) {
2093 netif_dbg(pdata, drv, pdata->netdev,
2094 "PRIO%u mapped to RXq%u\n", prio, i);
2095 mask |= (1 << prio);
2096 pdata->prio2q_map[prio++] = i;
2099 if (i < ppq_extra) {
2100 netif_dbg(pdata, drv, pdata->netdev,
2101 "PRIO%u mapped to RXq%u\n", prio, i);
2102 mask |= (1 << prio);
2103 pdata->prio2q_map[prio++] = i;
2106 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2108 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2111 XGMAC_IOWRITE(pdata, reg, reg_val);
2112 reg += MAC_RQC2_INC;
2116 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2119 for (i = 0; i < pdata->rx_q_count;) {
2120 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2122 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2125 XGMAC_IOWRITE(pdata, reg, reg_val);
2127 reg += MTL_RQDCM_INC;
2132 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2136 for (i = 0; i < pdata->rx_q_count; i++) {
2137 /* Activate flow control when less than 4k left in fifo */
2138 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2140 /* De-activate flow control when more than 6k left in fifo */
2141 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2145 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2147 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2149 /* Filtering is done using perfect filtering and hash filtering */
2150 if (pdata->hw_feat.hash_table_size) {
2151 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2152 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2153 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2157 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2161 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2163 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2166 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2168 switch (pdata->phy_speed) {
2170 xgbe_set_xgmii_speed(pdata);
2174 xgbe_set_gmii_2500_speed(pdata);
2178 xgbe_set_gmii_speed(pdata);
2183 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2185 if (pdata->netdev->features & NETIF_F_RXCSUM)
2186 xgbe_enable_rx_csum(pdata);
2188 xgbe_disable_rx_csum(pdata);
2191 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2193 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2194 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2195 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2197 /* Set the current VLAN Hash Table register value */
2198 xgbe_update_vlan_hash_table(pdata);
2200 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2201 xgbe_enable_rx_vlan_filtering(pdata);
2203 xgbe_disable_rx_vlan_filtering(pdata);
2205 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2206 xgbe_enable_rx_vlan_stripping(pdata);
2208 xgbe_disable_rx_vlan_stripping(pdata);
2211 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2217 /* These registers are always 64 bit */
2218 case MMC_TXOCTETCOUNT_GB_LO:
2219 case MMC_TXOCTETCOUNT_G_LO:
2220 case MMC_RXOCTETCOUNT_GB_LO:
2221 case MMC_RXOCTETCOUNT_G_LO:
2229 val = XGMAC_IOREAD(pdata, reg_lo);
2232 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2237 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2239 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2240 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2242 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2243 stats->txoctetcount_gb +=
2244 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2246 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2247 stats->txframecount_gb +=
2248 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2250 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2251 stats->txbroadcastframes_g +=
2252 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2254 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2255 stats->txmulticastframes_g +=
2256 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2258 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2259 stats->tx64octets_gb +=
2260 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2262 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2263 stats->tx65to127octets_gb +=
2264 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2266 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2267 stats->tx128to255octets_gb +=
2268 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2270 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2271 stats->tx256to511octets_gb +=
2272 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2274 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2275 stats->tx512to1023octets_gb +=
2276 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2278 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2279 stats->tx1024tomaxoctets_gb +=
2280 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2282 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2283 stats->txunicastframes_gb +=
2284 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2286 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2287 stats->txmulticastframes_gb +=
2288 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2290 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2291 stats->txbroadcastframes_g +=
2292 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2294 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2295 stats->txunderflowerror +=
2296 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2298 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2299 stats->txoctetcount_g +=
2300 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2302 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2303 stats->txframecount_g +=
2304 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2306 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2307 stats->txpauseframes +=
2308 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2310 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2311 stats->txvlanframes_g +=
2312 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2315 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2317 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2318 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2320 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2321 stats->rxframecount_gb +=
2322 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2324 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2325 stats->rxoctetcount_gb +=
2326 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2328 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2329 stats->rxoctetcount_g +=
2330 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2332 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2333 stats->rxbroadcastframes_g +=
2334 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2336 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2337 stats->rxmulticastframes_g +=
2338 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2340 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2341 stats->rxcrcerror +=
2342 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2344 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2345 stats->rxrunterror +=
2346 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2348 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2349 stats->rxjabbererror +=
2350 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2352 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2353 stats->rxundersize_g +=
2354 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2356 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2357 stats->rxoversize_g +=
2358 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2360 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2361 stats->rx64octets_gb +=
2362 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2364 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2365 stats->rx65to127octets_gb +=
2366 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2368 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2369 stats->rx128to255octets_gb +=
2370 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2372 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2373 stats->rx256to511octets_gb +=
2374 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2376 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2377 stats->rx512to1023octets_gb +=
2378 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2380 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2381 stats->rx1024tomaxoctets_gb +=
2382 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2384 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2385 stats->rxunicastframes_g +=
2386 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2388 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2389 stats->rxlengtherror +=
2390 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2392 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2393 stats->rxoutofrangetype +=
2394 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2396 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2397 stats->rxpauseframes +=
2398 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2400 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2401 stats->rxfifooverflow +=
2402 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2404 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2405 stats->rxvlanframes_gb +=
2406 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2408 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2409 stats->rxwatchdogerror +=
2410 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2413 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2415 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2417 /* Freeze counters */
2418 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2420 stats->txoctetcount_gb +=
2421 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2423 stats->txframecount_gb +=
2424 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2426 stats->txbroadcastframes_g +=
2427 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2429 stats->txmulticastframes_g +=
2430 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2432 stats->tx64octets_gb +=
2433 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2435 stats->tx65to127octets_gb +=
2436 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2438 stats->tx128to255octets_gb +=
2439 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2441 stats->tx256to511octets_gb +=
2442 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2444 stats->tx512to1023octets_gb +=
2445 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2447 stats->tx1024tomaxoctets_gb +=
2448 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2450 stats->txunicastframes_gb +=
2451 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2453 stats->txmulticastframes_gb +=
2454 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2456 stats->txbroadcastframes_g +=
2457 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2459 stats->txunderflowerror +=
2460 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2462 stats->txoctetcount_g +=
2463 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2465 stats->txframecount_g +=
2466 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2468 stats->txpauseframes +=
2469 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2471 stats->txvlanframes_g +=
2472 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2474 stats->rxframecount_gb +=
2475 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2477 stats->rxoctetcount_gb +=
2478 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2480 stats->rxoctetcount_g +=
2481 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2483 stats->rxbroadcastframes_g +=
2484 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2486 stats->rxmulticastframes_g +=
2487 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2489 stats->rxcrcerror +=
2490 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2492 stats->rxrunterror +=
2493 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2495 stats->rxjabbererror +=
2496 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2498 stats->rxundersize_g +=
2499 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2501 stats->rxoversize_g +=
2502 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2504 stats->rx64octets_gb +=
2505 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2507 stats->rx65to127octets_gb +=
2508 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2510 stats->rx128to255octets_gb +=
2511 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2513 stats->rx256to511octets_gb +=
2514 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2516 stats->rx512to1023octets_gb +=
2517 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2519 stats->rx1024tomaxoctets_gb +=
2520 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2522 stats->rxunicastframes_g +=
2523 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2525 stats->rxlengtherror +=
2526 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2528 stats->rxoutofrangetype +=
2529 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2531 stats->rxpauseframes +=
2532 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2534 stats->rxfifooverflow +=
2535 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2537 stats->rxvlanframes_gb +=
2538 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2540 stats->rxwatchdogerror +=
2541 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2543 /* Un-freeze counters */
2544 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
2547 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
2549 /* Set counters to reset on read */
2550 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2552 /* Reset the counters */
2553 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2556 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
2557 struct xgbe_channel *channel)
2559 unsigned int tx_dsr, tx_pos, tx_qidx;
2560 unsigned int tx_status;
2561 unsigned long tx_timeout;
2563 /* Calculate the status register to read and the position within */
2564 if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
2566 tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
2569 tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
2571 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
2572 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
2576 /* The Tx engine cannot be stopped if it is actively processing
2577 * descriptors. Wait for the Tx engine to enter the stopped or
2578 * suspended state. Don't wait forever though...
2580 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
2581 while (time_before(jiffies, tx_timeout)) {
2582 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
2583 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
2584 if ((tx_status == DMA_TPS_STOPPED) ||
2585 (tx_status == DMA_TPS_SUSPENDED))
2588 usleep_range(500, 1000);
2591 if (!time_before(jiffies, tx_timeout))
2592 netdev_info(pdata->netdev,
2593 "timed out waiting for Tx DMA channel %u to stop\n",
2594 channel->queue_index);
2597 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2599 struct xgbe_channel *channel;
2602 /* Enable each Tx DMA channel */
2603 channel = pdata->channel;
2604 for (i = 0; i < pdata->channel_count; i++, channel++) {
2605 if (!channel->tx_ring)
2608 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2611 /* Enable each Tx queue */
2612 for (i = 0; i < pdata->tx_q_count; i++)
2613 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2617 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2620 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2622 struct xgbe_channel *channel;
2625 /* Prepare for Tx DMA channel stop */
2626 channel = pdata->channel;
2627 for (i = 0; i < pdata->channel_count; i++, channel++) {
2628 if (!channel->tx_ring)
2631 xgbe_prepare_tx_stop(pdata, channel);
2634 /* Disable MAC Tx */
2635 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2637 /* Disable each Tx queue */
2638 for (i = 0; i < pdata->tx_q_count; i++)
2639 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2641 /* Disable each Tx DMA channel */
2642 channel = pdata->channel;
2643 for (i = 0; i < pdata->channel_count; i++, channel++) {
2644 if (!channel->tx_ring)
2647 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2651 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2653 struct xgbe_channel *channel;
2654 unsigned int reg_val, i;
2656 /* Enable each Rx DMA channel */
2657 channel = pdata->channel;
2658 for (i = 0; i < pdata->channel_count; i++, channel++) {
2659 if (!channel->rx_ring)
2662 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2665 /* Enable each Rx queue */
2667 for (i = 0; i < pdata->rx_q_count; i++)
2668 reg_val |= (0x02 << (i << 1));
2669 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2672 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2673 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2674 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2675 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2678 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
2680 struct xgbe_channel *channel;
2683 /* Disable MAC Rx */
2684 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2685 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2686 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2687 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2689 /* Disable each Rx queue */
2690 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2692 /* Disable each Rx DMA channel */
2693 channel = pdata->channel;
2694 for (i = 0; i < pdata->channel_count; i++, channel++) {
2695 if (!channel->rx_ring)
2698 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2702 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2704 struct xgbe_channel *channel;
2707 /* Enable each Tx DMA channel */
2708 channel = pdata->channel;
2709 for (i = 0; i < pdata->channel_count; i++, channel++) {
2710 if (!channel->tx_ring)
2713 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2717 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2720 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2722 struct xgbe_channel *channel;
2725 /* Prepare for Tx DMA channel stop */
2726 channel = pdata->channel;
2727 for (i = 0; i < pdata->channel_count; i++, channel++) {
2728 if (!channel->tx_ring)
2731 xgbe_prepare_tx_stop(pdata, channel);
2734 /* Disable MAC Tx */
2735 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2737 /* Disable each Tx DMA channel */
2738 channel = pdata->channel;
2739 for (i = 0; i < pdata->channel_count; i++, channel++) {
2740 if (!channel->tx_ring)
2743 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2747 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2749 struct xgbe_channel *channel;
2752 /* Enable each Rx DMA channel */
2753 channel = pdata->channel;
2754 for (i = 0; i < pdata->channel_count; i++, channel++) {
2755 if (!channel->rx_ring)
2758 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2762 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2764 struct xgbe_channel *channel;
2767 /* Disable each Rx DMA channel */
2768 channel = pdata->channel;
2769 for (i = 0; i < pdata->channel_count; i++, channel++) {
2770 if (!channel->rx_ring)
2773 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2777 static int xgbe_init(struct xgbe_prv_data *pdata)
2779 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2782 DBGPR("-->xgbe_init\n");
2784 /* Flush Tx queues */
2785 ret = xgbe_flush_tx_queues(pdata);
2790 * Initialize DMA related features
2792 xgbe_config_dma_bus(pdata);
2793 xgbe_config_dma_cache(pdata);
2794 xgbe_config_osp_mode(pdata);
2795 xgbe_config_pblx8(pdata);
2796 xgbe_config_tx_pbl_val(pdata);
2797 xgbe_config_rx_pbl_val(pdata);
2798 xgbe_config_rx_coalesce(pdata);
2799 xgbe_config_tx_coalesce(pdata);
2800 xgbe_config_rx_buffer_size(pdata);
2801 xgbe_config_tso_mode(pdata);
2802 xgbe_config_sph_mode(pdata);
2803 xgbe_config_rss(pdata);
2804 desc_if->wrapper_tx_desc_init(pdata);
2805 desc_if->wrapper_rx_desc_init(pdata);
2806 xgbe_enable_dma_interrupts(pdata);
2809 * Initialize MTL related features
2811 xgbe_config_mtl_mode(pdata);
2812 xgbe_config_queue_mapping(pdata);
2813 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2814 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2815 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2816 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2817 xgbe_config_tx_fifo_size(pdata);
2818 xgbe_config_rx_fifo_size(pdata);
2819 xgbe_config_flow_control_threshold(pdata);
2820 /*TODO: Error Packet and undersized good Packet forwarding enable
2823 xgbe_config_dcb_tc(pdata);
2824 xgbe_config_dcb_pfc(pdata);
2825 xgbe_enable_mtl_interrupts(pdata);
2828 * Initialize MAC related features
2830 xgbe_config_mac_address(pdata);
2831 xgbe_config_rx_mode(pdata);
2832 xgbe_config_jumbo_enable(pdata);
2833 xgbe_config_flow_control(pdata);
2834 xgbe_config_mac_speed(pdata);
2835 xgbe_config_checksum_offload(pdata);
2836 xgbe_config_vlan_support(pdata);
2837 xgbe_config_mmc(pdata);
2838 xgbe_enable_mac_interrupts(pdata);
2840 DBGPR("<--xgbe_init\n");
2845 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2847 DBGPR("-->xgbe_init_function_ptrs\n");
2849 hw_if->tx_complete = xgbe_tx_complete;
2851 hw_if->set_mac_address = xgbe_set_mac_address;
2852 hw_if->config_rx_mode = xgbe_config_rx_mode;
2854 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2855 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2857 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2858 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2859 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2860 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2861 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2863 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2864 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2866 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2867 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2868 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2870 hw_if->enable_tx = xgbe_enable_tx;
2871 hw_if->disable_tx = xgbe_disable_tx;
2872 hw_if->enable_rx = xgbe_enable_rx;
2873 hw_if->disable_rx = xgbe_disable_rx;
2875 hw_if->powerup_tx = xgbe_powerup_tx;
2876 hw_if->powerdown_tx = xgbe_powerdown_tx;
2877 hw_if->powerup_rx = xgbe_powerup_rx;
2878 hw_if->powerdown_rx = xgbe_powerdown_rx;
2880 hw_if->dev_xmit = xgbe_dev_xmit;
2881 hw_if->dev_read = xgbe_dev_read;
2882 hw_if->enable_int = xgbe_enable_int;
2883 hw_if->disable_int = xgbe_disable_int;
2884 hw_if->init = xgbe_init;
2885 hw_if->exit = xgbe_exit;
2887 /* Descriptor related Sequences have to be initialized here */
2888 hw_if->tx_desc_init = xgbe_tx_desc_init;
2889 hw_if->rx_desc_init = xgbe_rx_desc_init;
2890 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2891 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2892 hw_if->is_last_desc = xgbe_is_last_desc;
2893 hw_if->is_context_desc = xgbe_is_context_desc;
2894 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
2897 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2898 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2900 /* For RX coalescing */
2901 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2902 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2903 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2904 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2906 /* For RX and TX threshold config */
2907 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2908 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2910 /* For RX and TX Store and Forward Mode config */
2911 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2912 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2914 /* For TX DMA Operating on Second Frame config */
2915 hw_if->config_osp_mode = xgbe_config_osp_mode;
2917 /* For RX and TX PBL config */
2918 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2919 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2920 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2921 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2922 hw_if->config_pblx8 = xgbe_config_pblx8;
2924 /* For MMC statistics support */
2925 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2926 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2927 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2929 /* For PTP config */
2930 hw_if->config_tstamp = xgbe_config_tstamp;
2931 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
2932 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
2933 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
2934 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
2936 /* For Data Center Bridging config */
2937 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
2938 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
2940 /* For Receive Side Scaling */
2941 hw_if->enable_rss = xgbe_enable_rss;
2942 hw_if->disable_rss = xgbe_disable_rss;
2943 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2944 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
2946 DBGPR("<--xgbe_init_function_ptrs\n");