2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_vlan.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include <linux/of_address.h>
33 #include <linux/phy.h>
34 #include <linux/clk.h>
37 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
38 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
39 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
40 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
41 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
42 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
43 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
44 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
45 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
46 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
47 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
48 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
49 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
50 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
51 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
52 #define MVNETA_PORT_RX_RESET 0x1cc0
53 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
54 #define MVNETA_PHY_ADDR 0x2000
55 #define MVNETA_PHY_ADDR_MASK 0x1f
56 #define MVNETA_MBUS_RETRY 0x2010
57 #define MVNETA_UNIT_INTR_CAUSE 0x2080
58 #define MVNETA_UNIT_CONTROL 0x20B0
59 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
60 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
61 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
62 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
63 #define MVNETA_BASE_ADDR_ENABLE 0x2290
64 #define MVNETA_PORT_CONFIG 0x2400
65 #define MVNETA_UNI_PROMISC_MODE BIT(0)
66 #define MVNETA_DEF_RXQ(q) ((q) << 1)
67 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
68 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
69 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
70 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
71 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
72 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
73 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
74 MVNETA_DEF_RXQ_ARP(q) | \
75 MVNETA_DEF_RXQ_TCP(q) | \
76 MVNETA_DEF_RXQ_UDP(q) | \
77 MVNETA_DEF_RXQ_BPDU(q) | \
78 MVNETA_TX_UNSET_ERR_SUM | \
79 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
80 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
81 #define MVNETA_MAC_ADDR_LOW 0x2414
82 #define MVNETA_MAC_ADDR_HIGH 0x2418
83 #define MVNETA_SDMA_CONFIG 0x241c
84 #define MVNETA_SDMA_BRST_SIZE_16 4
85 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
86 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
87 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
88 #define MVNETA_DESC_SWAP BIT(6)
89 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
90 #define MVNETA_PORT_STATUS 0x2444
91 #define MVNETA_TX_IN_PRGRS BIT(1)
92 #define MVNETA_TX_FIFO_EMPTY BIT(8)
93 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
94 #define MVNETA_SERDES_CFG 0x24A0
95 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
96 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
97 #define MVNETA_TYPE_PRIO 0x24bc
98 #define MVNETA_FORCE_UNI BIT(21)
99 #define MVNETA_TXQ_CMD_1 0x24e4
100 #define MVNETA_TXQ_CMD 0x2448
101 #define MVNETA_TXQ_DISABLE_SHIFT 8
102 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
103 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
104 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
105 #define MVNETA_ACC_MODE 0x2500
106 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
107 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
108 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
109 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
111 /* Exception Interrupt Port/Queue Cause register */
113 #define MVNETA_INTR_NEW_CAUSE 0x25a0
114 #define MVNETA_INTR_NEW_MASK 0x25a4
116 /* bits 0..7 = TXQ SENT, one bit per queue.
117 * bits 8..15 = RXQ OCCUP, one bit per queue.
118 * bits 16..23 = RXQ FREE, one bit per queue.
119 * bit 29 = OLD_REG_SUM, see old reg ?
120 * bit 30 = TX_ERR_SUM, one bit for 4 ports
121 * bit 31 = MISC_SUM, one bit for 4 ports
123 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
124 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
125 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
126 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
127 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
129 #define MVNETA_INTR_OLD_CAUSE 0x25a8
130 #define MVNETA_INTR_OLD_MASK 0x25ac
132 /* Data Path Port/Queue Cause Register */
133 #define MVNETA_INTR_MISC_CAUSE 0x25b0
134 #define MVNETA_INTR_MISC_MASK 0x25b4
136 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
137 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
138 #define MVNETA_CAUSE_PTP BIT(4)
140 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
141 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
142 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
143 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
144 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
145 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
146 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
147 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
149 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
150 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
151 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
153 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
154 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
155 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
157 #define MVNETA_INTR_ENABLE 0x25b8
158 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
159 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
161 #define MVNETA_RXQ_CMD 0x2680
162 #define MVNETA_RXQ_DISABLE_SHIFT 8
163 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
164 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
165 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
166 #define MVNETA_GMAC_CTRL_0 0x2c00
167 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
168 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
169 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
170 #define MVNETA_GMAC_CTRL_2 0x2c08
171 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
172 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
173 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
174 #define MVNETA_GMAC2_PORT_RESET BIT(6)
175 #define MVNETA_GMAC_STATUS 0x2c10
176 #define MVNETA_GMAC_LINK_UP BIT(0)
177 #define MVNETA_GMAC_SPEED_1000 BIT(1)
178 #define MVNETA_GMAC_SPEED_100 BIT(2)
179 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
180 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
181 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
182 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
183 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
184 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
185 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
186 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
187 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
188 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
189 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
190 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
191 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
192 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
193 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
194 #define MVNETA_MIB_COUNTERS_BASE 0x3080
195 #define MVNETA_MIB_LATE_COLLISION 0x7c
196 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
197 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
198 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
199 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
200 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
201 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
202 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
203 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
204 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
205 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
206 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
207 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
208 #define MVNETA_PORT_TX_RESET 0x3cf0
209 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
210 #define MVNETA_TX_MTU 0x3e0c
211 #define MVNETA_TX_TOKEN_SIZE 0x3e14
212 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
213 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
214 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
216 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
218 /* Descriptor ring Macros */
219 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
220 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
222 /* Various constants */
225 #define MVNETA_TXDONE_COAL_PKTS 1
226 #define MVNETA_RX_COAL_PKTS 32
227 #define MVNETA_RX_COAL_USEC 100
229 /* The two bytes Marvell header. Either contains a special value used
230 * by Marvell switches when a specific hardware mode is enabled (not
231 * supported by this driver) or is filled automatically by zeroes on
232 * the RX side. Those two bytes being at the front of the Ethernet
233 * header, they allow to have the IP header aligned on a 4 bytes
234 * boundary automatically: the hardware skips those two bytes on its
237 #define MVNETA_MH_SIZE 2
239 #define MVNETA_VLAN_TAG_LEN 4
241 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
242 #define MVNETA_TX_CSUM_MAX_SIZE 9800
243 #define MVNETA_ACC_MODE_EXT 1
245 /* Timeout constants */
246 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
247 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
248 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
250 #define MVNETA_TX_MTU_MAX 0x3ffff
252 /* TSO header size */
253 #define TSO_HEADER_SIZE 128
255 /* Max number of Rx descriptors */
256 #define MVNETA_MAX_RXD 128
258 /* Max number of Tx descriptors */
259 #define MVNETA_MAX_TXD 532
261 /* Max number of allowed TCP segments for software TSO */
262 #define MVNETA_MAX_TSO_SEGS 100
264 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
266 /* descriptor aligned size */
267 #define MVNETA_DESC_ALIGNED_SIZE 32
269 #define MVNETA_RX_PKT_SIZE(mtu) \
270 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
271 ETH_HLEN + ETH_FCS_LEN, \
272 MVNETA_CPU_D_CACHE_LINE_SIZE)
274 #define IS_TSO_HEADER(txq, addr) \
275 ((addr >= txq->tso_hdrs_phys) && \
276 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
278 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
280 struct mvneta_pcpu_stats {
281 struct u64_stats_sync syncp;
290 unsigned int frag_size;
292 struct mvneta_rx_queue *rxqs;
293 struct mvneta_tx_queue *txqs;
294 struct net_device *dev;
297 struct napi_struct napi;
304 struct mvneta_pcpu_stats *stats;
306 struct mii_bus *mii_bus;
307 struct phy_device *phy_dev;
308 phy_interface_t phy_interface;
309 struct device_node *phy_node;
313 unsigned int tx_csum_limit;
314 int use_inband_status:1;
317 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
318 * layout of the transmit and reception DMA descriptors, and their
319 * layout is therefore defined by the hardware design
322 #define MVNETA_TX_L3_OFF_SHIFT 0
323 #define MVNETA_TX_IP_HLEN_SHIFT 8
324 #define MVNETA_TX_L4_UDP BIT(16)
325 #define MVNETA_TX_L3_IP6 BIT(17)
326 #define MVNETA_TXD_IP_CSUM BIT(18)
327 #define MVNETA_TXD_Z_PAD BIT(19)
328 #define MVNETA_TXD_L_DESC BIT(20)
329 #define MVNETA_TXD_F_DESC BIT(21)
330 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
331 MVNETA_TXD_L_DESC | \
333 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
334 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
336 #define MVNETA_RXD_ERR_CRC 0x0
337 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
338 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
339 #define MVNETA_RXD_ERR_LEN BIT(18)
340 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
341 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
342 #define MVNETA_RXD_L3_IP4 BIT(25)
343 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
344 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
346 #if defined(__LITTLE_ENDIAN)
347 struct mvneta_tx_desc {
348 u32 command; /* Options used by HW for packet transmitting.*/
349 u16 reserverd1; /* csum_l4 (for future use) */
350 u16 data_size; /* Data size of transmitted packet in bytes */
351 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
352 u32 reserved2; /* hw_cmd - (for future use, PMT) */
353 u32 reserved3[4]; /* Reserved - (for future use) */
356 struct mvneta_rx_desc {
357 u32 status; /* Info about received packet */
358 u16 reserved1; /* pnc_info - (for future use, PnC) */
359 u16 data_size; /* Size of received packet in bytes */
361 u32 buf_phys_addr; /* Physical address of the buffer */
362 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
364 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
365 u16 reserved3; /* prefetch_cmd, for future use */
366 u16 reserved4; /* csum_l4 - (for future use, PnC) */
368 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
369 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
372 struct mvneta_tx_desc {
373 u16 data_size; /* Data size of transmitted packet in bytes */
374 u16 reserverd1; /* csum_l4 (for future use) */
375 u32 command; /* Options used by HW for packet transmitting.*/
376 u32 reserved2; /* hw_cmd - (for future use, PMT) */
377 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
378 u32 reserved3[4]; /* Reserved - (for future use) */
381 struct mvneta_rx_desc {
382 u16 data_size; /* Size of received packet in bytes */
383 u16 reserved1; /* pnc_info - (for future use, PnC) */
384 u32 status; /* Info about received packet */
386 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
387 u32 buf_phys_addr; /* Physical address of the buffer */
389 u16 reserved4; /* csum_l4 - (for future use, PnC) */
390 u16 reserved3; /* prefetch_cmd, for future use */
391 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
393 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
394 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
398 struct mvneta_tx_queue {
399 /* Number of this TX queue, in the range 0-7 */
402 /* Number of TX DMA descriptors in the descriptor ring */
405 /* Number of currently used TX DMA descriptor in the
409 int tx_stop_threshold;
410 int tx_wake_threshold;
412 /* Array of transmitted skb */
413 struct sk_buff **tx_skb;
415 /* Index of last TX DMA descriptor that was inserted */
418 /* Index of the TX DMA descriptor to be cleaned up */
423 /* Virtual address of the TX DMA descriptors array */
424 struct mvneta_tx_desc *descs;
426 /* DMA address of the TX DMA descriptors array */
427 dma_addr_t descs_phys;
429 /* Index of the last TX DMA descriptor */
432 /* Index of the next TX DMA descriptor to process */
433 int next_desc_to_proc;
435 /* DMA buffers for TSO headers */
438 /* DMA address of TSO headers */
439 dma_addr_t tso_hdrs_phys;
442 struct mvneta_rx_queue {
443 /* rx queue number, in the range 0-7 */
446 /* num of rx descriptors in the rx descriptor ring */
449 /* counter of times when mvneta_refill() failed */
455 /* Virtual address of the RX DMA descriptors array */
456 struct mvneta_rx_desc *descs;
458 /* DMA address of the RX DMA descriptors array */
459 dma_addr_t descs_phys;
461 /* Index of the last RX DMA descriptor */
464 /* Index of the next RX DMA descriptor to process */
465 int next_desc_to_proc;
468 /* The hardware supports eight (8) rx queues, but we are only allowing
469 * the first one to be used. Therefore, let's just allocate one queue.
471 static int rxq_number = 1;
472 static int txq_number = 8;
476 static int rx_copybreak __read_mostly = 256;
478 #define MVNETA_DRIVER_NAME "mvneta"
479 #define MVNETA_DRIVER_VERSION "1.0"
481 /* Utility/helper methods */
483 /* Write helper method */
484 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
486 writel(data, pp->base + offset);
489 /* Read helper method */
490 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
492 return readl(pp->base + offset);
495 /* Increment txq get counter */
496 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
498 txq->txq_get_index++;
499 if (txq->txq_get_index == txq->size)
500 txq->txq_get_index = 0;
503 /* Increment txq put counter */
504 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
506 txq->txq_put_index++;
507 if (txq->txq_put_index == txq->size)
508 txq->txq_put_index = 0;
512 /* Clear all MIB counters */
513 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
518 /* Perform dummy reads from MIB counters */
519 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
520 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
523 /* Get System Network Statistics */
524 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
525 struct rtnl_link_stats64 *stats)
527 struct mvneta_port *pp = netdev_priv(dev);
531 for_each_possible_cpu(cpu) {
532 struct mvneta_pcpu_stats *cpu_stats;
538 cpu_stats = per_cpu_ptr(pp->stats, cpu);
540 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
541 rx_packets = cpu_stats->rx_packets;
542 rx_bytes = cpu_stats->rx_bytes;
543 tx_packets = cpu_stats->tx_packets;
544 tx_bytes = cpu_stats->tx_bytes;
545 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
547 stats->rx_packets += rx_packets;
548 stats->rx_bytes += rx_bytes;
549 stats->tx_packets += tx_packets;
550 stats->tx_bytes += tx_bytes;
553 stats->rx_errors = dev->stats.rx_errors;
554 stats->rx_dropped = dev->stats.rx_dropped;
556 stats->tx_dropped = dev->stats.tx_dropped;
561 /* Rx descriptors helper methods */
563 /* Checks whether the RX descriptor having this status is both the first
564 * and the last descriptor for the RX packet. Each RX packet is currently
565 * received through a single RX descriptor, so not having each RX
566 * descriptor with its first and last bits set is an error
568 static int mvneta_rxq_desc_is_first_last(u32 status)
570 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
571 MVNETA_RXD_FIRST_LAST_DESC;
574 /* Add number of descriptors ready to receive new packets */
575 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
576 struct mvneta_rx_queue *rxq,
579 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
582 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
583 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
584 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
585 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
586 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
589 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
590 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
593 /* Get number of RX descriptors occupied by received packets */
594 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
595 struct mvneta_rx_queue *rxq)
599 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
600 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
603 /* Update num of rx desc called upon return from rx path or
604 * from mvneta_rxq_drop_pkts().
606 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
607 struct mvneta_rx_queue *rxq,
608 int rx_done, int rx_filled)
612 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
614 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
615 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
619 /* Only 255 descriptors can be added at once */
620 while ((rx_done > 0) || (rx_filled > 0)) {
621 if (rx_done <= 0xff) {
628 if (rx_filled <= 0xff) {
629 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
632 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
635 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
639 /* Get pointer to next RX descriptor to be processed by SW */
640 static struct mvneta_rx_desc *
641 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
643 int rx_desc = rxq->next_desc_to_proc;
645 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
646 prefetch(rxq->descs + rxq->next_desc_to_proc);
647 return rxq->descs + rx_desc;
650 /* Change maximum receive size of the port. */
651 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
655 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
656 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
657 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
658 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
659 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
663 /* Set rx queue offset */
664 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
665 struct mvneta_rx_queue *rxq,
670 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
671 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
674 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
675 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
679 /* Tx descriptors helper methods */
681 /* Update HW with number of TX descriptors to be sent */
682 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
683 struct mvneta_tx_queue *txq,
688 /* Only 255 descriptors can be added at once ; Assume caller
689 * process TX desriptors in quanta less than 256
692 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
695 /* Get pointer to next TX descriptor to be processed (send) by HW */
696 static struct mvneta_tx_desc *
697 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
699 int tx_desc = txq->next_desc_to_proc;
701 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
702 return txq->descs + tx_desc;
705 /* Release the last allocated TX descriptor. Useful to handle DMA
706 * mapping failures in the TX path.
708 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
710 if (txq->next_desc_to_proc == 0)
711 txq->next_desc_to_proc = txq->last_desc - 1;
713 txq->next_desc_to_proc--;
716 /* Set rxq buf size */
717 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
718 struct mvneta_rx_queue *rxq,
723 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
725 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
726 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
728 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
731 /* Disable buffer management (BM) */
732 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
733 struct mvneta_rx_queue *rxq)
737 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
738 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
739 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
742 /* Start the Ethernet port RX and TX activity */
743 static void mvneta_port_up(struct mvneta_port *pp)
748 /* Enable all initialized TXs. */
749 mvneta_mib_counters_clear(pp);
751 for (queue = 0; queue < txq_number; queue++) {
752 struct mvneta_tx_queue *txq = &pp->txqs[queue];
753 if (txq->descs != NULL)
754 q_map |= (1 << queue);
756 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
758 /* Enable all initialized RXQs. */
760 for (queue = 0; queue < rxq_number; queue++) {
761 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
762 if (rxq->descs != NULL)
763 q_map |= (1 << queue);
766 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
769 /* Stop the Ethernet port activity */
770 static void mvneta_port_down(struct mvneta_port *pp)
775 /* Stop Rx port activity. Check port Rx activity. */
776 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
778 /* Issue stop command for active channels only */
780 mvreg_write(pp, MVNETA_RXQ_CMD,
781 val << MVNETA_RXQ_DISABLE_SHIFT);
783 /* Wait for all Rx activity to terminate. */
786 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
788 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
794 val = mvreg_read(pp, MVNETA_RXQ_CMD);
795 } while (val & 0xff);
797 /* Stop Tx port activity. Check port Tx activity. Issue stop
798 * command for active channels only
800 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
803 mvreg_write(pp, MVNETA_TXQ_CMD,
804 (val << MVNETA_TXQ_DISABLE_SHIFT));
806 /* Wait for all Tx activity to terminate. */
809 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
811 "TIMEOUT for TX stopped status=0x%08x\n",
817 /* Check TX Command reg that all Txqs are stopped */
818 val = mvreg_read(pp, MVNETA_TXQ_CMD);
820 } while (val & 0xff);
822 /* Double check to verify that TX FIFO is empty */
825 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
827 "TX FIFO empty timeout status=0x08%x\n",
833 val = mvreg_read(pp, MVNETA_PORT_STATUS);
834 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
835 (val & MVNETA_TX_IN_PRGRS));
840 /* Enable the port by setting the port enable bit of the MAC control register */
841 static void mvneta_port_enable(struct mvneta_port *pp)
846 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
847 val |= MVNETA_GMAC0_PORT_ENABLE;
848 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
851 /* Disable the port and wait for about 200 usec before retuning */
852 static void mvneta_port_disable(struct mvneta_port *pp)
856 /* Reset the Enable bit in the Serial Control Register */
857 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
858 val &= ~MVNETA_GMAC0_PORT_ENABLE;
859 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
864 /* Multicast tables methods */
866 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
867 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
875 val = 0x1 | (queue << 1);
876 val |= (val << 24) | (val << 16) | (val << 8);
879 for (offset = 0; offset <= 0xc; offset += 4)
880 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
883 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
884 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
892 val = 0x1 | (queue << 1);
893 val |= (val << 24) | (val << 16) | (val << 8);
896 for (offset = 0; offset <= 0xfc; offset += 4)
897 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
901 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
902 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
908 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
911 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
912 val = 0x1 | (queue << 1);
913 val |= (val << 24) | (val << 16) | (val << 8);
916 for (offset = 0; offset <= 0xfc; offset += 4)
917 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
920 /* This method sets defaults to the NETA port:
921 * Clears interrupt Cause and Mask registers.
922 * Clears all MAC tables.
923 * Sets defaults to all registers.
924 * Resets RX and TX descriptor rings.
926 * This method can be called after mvneta_port_down() to return the port
927 * settings to defaults.
929 static void mvneta_defaults_set(struct mvneta_port *pp)
935 /* Clear all Cause registers */
936 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
937 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
938 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
940 /* Mask all interrupts */
941 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
942 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
943 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
944 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
946 /* Enable MBUS Retry bit16 */
947 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
949 /* Set CPU queue access map - all CPUs have access to all RX
950 * queues and to all TX queues
952 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
953 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
954 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
955 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
957 /* Reset RX and TX DMAs */
958 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
959 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
961 /* Disable Legacy WRR, Disable EJP, Release from reset */
962 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
963 for (queue = 0; queue < txq_number; queue++) {
964 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
965 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
968 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
969 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
971 /* Set Port Acceleration Mode */
972 val = MVNETA_ACC_MODE_EXT;
973 mvreg_write(pp, MVNETA_ACC_MODE, val);
975 /* Update val of portCfg register accordingly with all RxQueue types */
976 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
977 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
980 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
981 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
983 /* Build PORT_SDMA_CONFIG_REG */
986 /* Default burst size */
987 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
988 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
989 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
991 #if defined(__BIG_ENDIAN)
992 val |= MVNETA_DESC_SWAP;
995 /* Assign port SDMA configuration */
996 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
998 /* Disable PHY polling in hardware, since we're using the
999 * kernel phylib to do this.
1001 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1002 val &= ~MVNETA_PHY_POLLING_ENABLE;
1003 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1005 if (pp->use_inband_status) {
1006 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1007 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1008 MVNETA_GMAC_FORCE_LINK_DOWN |
1009 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1010 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1011 MVNETA_GMAC_AN_SPEED_EN |
1012 MVNETA_GMAC_AN_DUPLEX_EN;
1013 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1014 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1015 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1016 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1018 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1019 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1020 MVNETA_GMAC_AN_SPEED_EN |
1021 MVNETA_GMAC_AN_DUPLEX_EN);
1022 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1025 mvneta_set_ucast_table(pp, -1);
1026 mvneta_set_special_mcast_table(pp, -1);
1027 mvneta_set_other_mcast_table(pp, -1);
1029 /* Set port interrupt enable register - default enable all */
1030 mvreg_write(pp, MVNETA_INTR_ENABLE,
1031 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1032 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1035 /* Set max sizes for tx queues */
1036 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1042 mtu = max_tx_size * 8;
1043 if (mtu > MVNETA_TX_MTU_MAX)
1044 mtu = MVNETA_TX_MTU_MAX;
1047 val = mvreg_read(pp, MVNETA_TX_MTU);
1048 val &= ~MVNETA_TX_MTU_MAX;
1050 mvreg_write(pp, MVNETA_TX_MTU, val);
1052 /* TX token size and all TXQs token size must be larger that MTU */
1053 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1055 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1058 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1060 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1062 for (queue = 0; queue < txq_number; queue++) {
1063 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1065 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1068 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1070 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1075 /* Set unicast address */
1076 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1079 unsigned int unicast_reg;
1080 unsigned int tbl_offset;
1081 unsigned int reg_offset;
1083 /* Locate the Unicast table entry */
1084 last_nibble = (0xf & last_nibble);
1086 /* offset from unicast tbl base */
1087 tbl_offset = (last_nibble / 4) * 4;
1089 /* offset within the above reg */
1090 reg_offset = last_nibble % 4;
1092 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1095 /* Clear accepts frame bit at specified unicast DA tbl entry */
1096 unicast_reg &= ~(0xff << (8 * reg_offset));
1098 unicast_reg &= ~(0xff << (8 * reg_offset));
1099 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1102 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1105 /* Set mac address */
1106 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1113 mac_l = (addr[4] << 8) | (addr[5]);
1114 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1115 (addr[2] << 8) | (addr[3] << 0);
1117 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1118 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1121 /* Accept frames of this address */
1122 mvneta_set_ucast_addr(pp, addr[5], queue);
1125 /* Set the number of packets that will be received before RX interrupt
1126 * will be generated by HW.
1128 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1129 struct mvneta_rx_queue *rxq, u32 value)
1131 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1132 value | MVNETA_RXQ_NON_OCCUPIED(0));
1133 rxq->pkts_coal = value;
1136 /* Set the time delay in usec before RX interrupt will be generated by
1139 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1140 struct mvneta_rx_queue *rxq, u32 value)
1143 unsigned long clk_rate;
1145 clk_rate = clk_get_rate(pp->clk);
1146 val = (clk_rate / 1000000) * value;
1148 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1149 rxq->time_coal = value;
1152 /* Set threshold for TX_DONE pkts coalescing */
1153 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1154 struct mvneta_tx_queue *txq, u32 value)
1158 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1160 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1161 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1163 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1165 txq->done_pkts_coal = value;
1168 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1169 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1170 u32 phys_addr, u32 cookie)
1172 rx_desc->buf_cookie = cookie;
1173 rx_desc->buf_phys_addr = phys_addr;
1176 /* Decrement sent descriptors counter */
1177 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1178 struct mvneta_tx_queue *txq,
1183 /* Only 255 TX descriptors can be updated at once */
1184 while (sent_desc > 0xff) {
1185 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1186 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1187 sent_desc = sent_desc - 0xff;
1190 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1191 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1194 /* Get number of TX descriptors already sent by HW */
1195 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1196 struct mvneta_tx_queue *txq)
1201 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1202 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1203 MVNETA_TXQ_SENT_DESC_SHIFT;
1208 /* Get number of sent descriptors and decrement counter.
1209 * The number of sent descriptors is returned.
1211 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1212 struct mvneta_tx_queue *txq)
1216 /* Get number of sent descriptors */
1217 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1219 /* Decrement sent descriptors counter */
1221 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1226 /* Set TXQ descriptors fields relevant for CSUM calculation */
1227 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1228 int ip_hdr_len, int l4_proto)
1232 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1233 * G_L4_chk, L4_type; required only for checksum
1236 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1237 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1239 if (l3_proto == htons(ETH_P_IP))
1240 command |= MVNETA_TXD_IP_CSUM;
1242 command |= MVNETA_TX_L3_IP6;
1244 if (l4_proto == IPPROTO_TCP)
1245 command |= MVNETA_TX_L4_CSUM_FULL;
1246 else if (l4_proto == IPPROTO_UDP)
1247 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1249 command |= MVNETA_TX_L4_CSUM_NOT;
1255 /* Display more error info */
1256 static void mvneta_rx_error(struct mvneta_port *pp,
1257 struct mvneta_rx_desc *rx_desc)
1259 u32 status = rx_desc->status;
1261 if (!mvneta_rxq_desc_is_first_last(status)) {
1263 "bad rx status %08x (buffer oversize), size=%d\n",
1264 status, rx_desc->data_size);
1268 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1269 case MVNETA_RXD_ERR_CRC:
1270 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1271 status, rx_desc->data_size);
1273 case MVNETA_RXD_ERR_OVERRUN:
1274 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1275 status, rx_desc->data_size);
1277 case MVNETA_RXD_ERR_LEN:
1278 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1279 status, rx_desc->data_size);
1281 case MVNETA_RXD_ERR_RESOURCE:
1282 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1283 status, rx_desc->data_size);
1288 /* Handle RX checksum offload based on the descriptor's status */
1289 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1290 struct sk_buff *skb)
1292 if ((status & MVNETA_RXD_L3_IP4) &&
1293 (status & MVNETA_RXD_L4_CSUM_OK)) {
1295 skb->ip_summed = CHECKSUM_UNNECESSARY;
1299 skb->ip_summed = CHECKSUM_NONE;
1302 /* Return tx queue pointer (find last set bit) according to <cause> returned
1303 * form tx_done reg. <cause> must not be null. The return value is always a
1304 * valid queue for matching the first one found in <cause>.
1306 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1309 int queue = fls(cause) - 1;
1311 return &pp->txqs[queue];
1314 /* Free tx queue skbuffs */
1315 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1316 struct mvneta_tx_queue *txq, int num)
1320 for (i = 0; i < num; i++) {
1321 struct mvneta_tx_desc *tx_desc = txq->descs +
1323 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1325 mvneta_txq_inc_get(txq);
1327 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1328 dma_unmap_single(pp->dev->dev.parent,
1329 tx_desc->buf_phys_addr,
1330 tx_desc->data_size, DMA_TO_DEVICE);
1333 dev_kfree_skb_any(skb);
1337 /* Handle end of transmission */
1338 static void mvneta_txq_done(struct mvneta_port *pp,
1339 struct mvneta_tx_queue *txq)
1341 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1344 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1348 mvneta_txq_bufs_free(pp, txq, tx_done);
1350 txq->count -= tx_done;
1352 if (netif_tx_queue_stopped(nq)) {
1353 if (txq->count <= txq->tx_wake_threshold)
1354 netif_tx_wake_queue(nq);
1358 static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1360 if (likely(pp->frag_size <= PAGE_SIZE))
1361 return netdev_alloc_frag(pp->frag_size);
1363 return kmalloc(pp->frag_size, GFP_ATOMIC);
1366 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1368 if (likely(pp->frag_size <= PAGE_SIZE))
1369 skb_free_frag(data);
1374 /* Refill processing */
1375 static int mvneta_rx_refill(struct mvneta_port *pp,
1376 struct mvneta_rx_desc *rx_desc)
1379 dma_addr_t phys_addr;
1382 data = mvneta_frag_alloc(pp);
1386 phys_addr = dma_map_single(pp->dev->dev.parent, data,
1387 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1389 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1390 mvneta_frag_free(pp, data);
1394 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
1398 /* Handle tx checksum */
1399 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1401 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1403 __be16 l3_proto = vlan_get_protocol(skb);
1406 if (l3_proto == htons(ETH_P_IP)) {
1407 struct iphdr *ip4h = ip_hdr(skb);
1409 /* Calculate IPv4 checksum and L4 checksum */
1410 ip_hdr_len = ip4h->ihl;
1411 l4_proto = ip4h->protocol;
1412 } else if (l3_proto == htons(ETH_P_IPV6)) {
1413 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1415 /* Read l4_protocol from one of IPv6 extra headers */
1416 if (skb_network_header_len(skb) > 0)
1417 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1418 l4_proto = ip6h->nexthdr;
1420 return MVNETA_TX_L4_CSUM_NOT;
1422 return mvneta_txq_desc_csum(skb_network_offset(skb),
1423 l3_proto, ip_hdr_len, l4_proto);
1426 return MVNETA_TX_L4_CSUM_NOT;
1429 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1432 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1435 int queue = fls(cause >> 8) - 1;
1437 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1440 /* Drop packets received by the RXQ and free buffers */
1441 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1442 struct mvneta_rx_queue *rxq)
1446 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1447 for (i = 0; i < rxq->size; i++) {
1448 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1449 void *data = (void *)rx_desc->buf_cookie;
1451 mvneta_frag_free(pp, data);
1452 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1453 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1457 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1460 /* Main rx processing */
1461 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 struct mvneta_rx_queue *rxq)
1464 struct net_device *dev = pp->dev;
1469 /* Get number of received packets */
1470 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1472 if (rx_todo > rx_done)
1477 /* Fairness NAPI loop */
1478 while (rx_done < rx_todo) {
1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1480 struct sk_buff *skb;
1481 unsigned char *data;
1482 dma_addr_t phys_addr;
1487 rx_status = rx_desc->status;
1488 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1489 data = (unsigned char *)rx_desc->buf_cookie;
1490 phys_addr = rx_desc->buf_phys_addr;
1492 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1493 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1495 dev->stats.rx_errors++;
1496 mvneta_rx_error(pp, rx_desc);
1497 /* leave the descriptor untouched */
1501 if (rx_bytes <= rx_copybreak) {
1502 /* better copy a small frame and not unmap the DMA region */
1503 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1505 goto err_drop_frame;
1507 dma_sync_single_range_for_cpu(dev->dev.parent,
1508 rx_desc->buf_phys_addr,
1509 MVNETA_MH_SIZE + NET_SKB_PAD,
1512 memcpy(skb_put(skb, rx_bytes),
1513 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1516 skb->protocol = eth_type_trans(skb, dev);
1517 mvneta_rx_csum(pp, rx_status, skb);
1518 napi_gro_receive(&pp->napi, skb);
1521 rcvd_bytes += rx_bytes;
1523 /* leave the descriptor and buffer untouched */
1527 /* Refill processing */
1528 err = mvneta_rx_refill(pp, rx_desc);
1530 netdev_err(dev, "Linux processing - Can't refill\n");
1532 goto err_drop_frame;
1535 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1537 goto err_drop_frame;
1539 dma_unmap_single(dev->dev.parent, phys_addr,
1540 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1543 rcvd_bytes += rx_bytes;
1545 /* Linux processing */
1546 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
1547 skb_put(skb, rx_bytes);
1549 skb->protocol = eth_type_trans(skb, dev);
1551 mvneta_rx_csum(pp, rx_status, skb);
1553 napi_gro_receive(&pp->napi, skb);
1557 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1559 u64_stats_update_begin(&stats->syncp);
1560 stats->rx_packets += rcvd_pkts;
1561 stats->rx_bytes += rcvd_bytes;
1562 u64_stats_update_end(&stats->syncp);
1565 /* Update rxq management counters */
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1572 mvneta_tso_put_hdr(struct sk_buff *skb,
1573 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1575 struct mvneta_tx_desc *tx_desc;
1576 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1578 txq->tx_skb[txq->txq_put_index] = NULL;
1579 tx_desc = mvneta_txq_next_desc_get(txq);
1580 tx_desc->data_size = hdr_len;
1581 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1582 tx_desc->command |= MVNETA_TXD_F_DESC;
1583 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1584 txq->txq_put_index * TSO_HEADER_SIZE;
1585 mvneta_txq_inc_put(txq);
1589 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1590 struct sk_buff *skb, char *data, int size,
1591 bool last_tcp, bool is_last)
1593 struct mvneta_tx_desc *tx_desc;
1595 tx_desc = mvneta_txq_next_desc_get(txq);
1596 tx_desc->data_size = size;
1597 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1598 size, DMA_TO_DEVICE);
1599 if (unlikely(dma_mapping_error(dev->dev.parent,
1600 tx_desc->buf_phys_addr))) {
1601 mvneta_txq_desc_put(txq);
1605 tx_desc->command = 0;
1606 txq->tx_skb[txq->txq_put_index] = NULL;
1609 /* last descriptor in the TCP packet */
1610 tx_desc->command = MVNETA_TXD_L_DESC;
1612 /* last descriptor in SKB */
1614 txq->tx_skb[txq->txq_put_index] = skb;
1616 mvneta_txq_inc_put(txq);
1620 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1621 struct mvneta_tx_queue *txq)
1623 int total_len, data_left;
1625 struct mvneta_port *pp = netdev_priv(dev);
1627 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1630 /* Count needed descriptors */
1631 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1634 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1635 pr_info("*** Is this even possible???!?!?\n");
1639 /* Initialize the TSO handler, and prepare the first payload */
1640 tso_start(skb, &tso);
1642 total_len = skb->len - hdr_len;
1643 while (total_len > 0) {
1646 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1647 total_len -= data_left;
1650 /* prepare packet headers: MAC + IP + TCP */
1651 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1652 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1654 mvneta_tso_put_hdr(skb, pp, txq);
1656 while (data_left > 0) {
1660 size = min_t(int, tso.size, data_left);
1662 if (mvneta_tso_put_data(dev, txq, skb,
1669 tso_build_data(skb, &tso, size);
1676 /* Release all used data descriptors; header descriptors must not
1679 for (i = desc_count - 1; i >= 0; i--) {
1680 struct mvneta_tx_desc *tx_desc = txq->descs + i;
1681 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1682 dma_unmap_single(pp->dev->dev.parent,
1683 tx_desc->buf_phys_addr,
1686 mvneta_txq_desc_put(txq);
1691 /* Handle tx fragmentation processing */
1692 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1693 struct mvneta_tx_queue *txq)
1695 struct mvneta_tx_desc *tx_desc;
1696 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1698 for (i = 0; i < nr_frags; i++) {
1699 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1700 void *addr = page_address(frag->page.p) + frag->page_offset;
1702 tx_desc = mvneta_txq_next_desc_get(txq);
1703 tx_desc->data_size = frag->size;
1705 tx_desc->buf_phys_addr =
1706 dma_map_single(pp->dev->dev.parent, addr,
1707 tx_desc->data_size, DMA_TO_DEVICE);
1709 if (dma_mapping_error(pp->dev->dev.parent,
1710 tx_desc->buf_phys_addr)) {
1711 mvneta_txq_desc_put(txq);
1715 if (i == nr_frags - 1) {
1716 /* Last descriptor */
1717 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1718 txq->tx_skb[txq->txq_put_index] = skb;
1720 /* Descriptor in the middle: Not First, Not Last */
1721 tx_desc->command = 0;
1722 txq->tx_skb[txq->txq_put_index] = NULL;
1724 mvneta_txq_inc_put(txq);
1730 /* Release all descriptors that were used to map fragments of
1731 * this packet, as well as the corresponding DMA mappings
1733 for (i = i - 1; i >= 0; i--) {
1734 tx_desc = txq->descs + i;
1735 dma_unmap_single(pp->dev->dev.parent,
1736 tx_desc->buf_phys_addr,
1739 mvneta_txq_desc_put(txq);
1745 /* Main tx processing */
1746 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1748 struct mvneta_port *pp = netdev_priv(dev);
1749 u16 txq_id = skb_get_queue_mapping(skb);
1750 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1751 struct mvneta_tx_desc *tx_desc;
1756 if (!netif_running(dev))
1759 if (skb_is_gso(skb)) {
1760 frags = mvneta_tx_tso(skb, dev, txq);
1764 frags = skb_shinfo(skb)->nr_frags + 1;
1766 /* Get a descriptor for the first part of the packet */
1767 tx_desc = mvneta_txq_next_desc_get(txq);
1769 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1771 tx_desc->data_size = skb_headlen(skb);
1773 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1776 if (unlikely(dma_mapping_error(dev->dev.parent,
1777 tx_desc->buf_phys_addr))) {
1778 mvneta_txq_desc_put(txq);
1784 /* First and Last descriptor */
1785 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1786 tx_desc->command = tx_cmd;
1787 txq->tx_skb[txq->txq_put_index] = skb;
1788 mvneta_txq_inc_put(txq);
1790 /* First but not Last */
1791 tx_cmd |= MVNETA_TXD_F_DESC;
1792 txq->tx_skb[txq->txq_put_index] = NULL;
1793 mvneta_txq_inc_put(txq);
1794 tx_desc->command = tx_cmd;
1795 /* Continue with other skb fragments */
1796 if (mvneta_tx_frag_process(pp, skb, txq)) {
1797 dma_unmap_single(dev->dev.parent,
1798 tx_desc->buf_phys_addr,
1801 mvneta_txq_desc_put(txq);
1809 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1810 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1812 txq->count += frags;
1813 mvneta_txq_pend_desc_add(pp, txq, frags);
1815 if (txq->count >= txq->tx_stop_threshold)
1816 netif_tx_stop_queue(nq);
1818 u64_stats_update_begin(&stats->syncp);
1819 stats->tx_packets++;
1820 stats->tx_bytes += len;
1821 u64_stats_update_end(&stats->syncp);
1823 dev->stats.tx_dropped++;
1824 dev_kfree_skb_any(skb);
1827 return NETDEV_TX_OK;
1831 /* Free tx resources, when resetting a port */
1832 static void mvneta_txq_done_force(struct mvneta_port *pp,
1833 struct mvneta_tx_queue *txq)
1836 int tx_done = txq->count;
1838 mvneta_txq_bufs_free(pp, txq, tx_done);
1842 txq->txq_put_index = 0;
1843 txq->txq_get_index = 0;
1846 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1847 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1849 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
1851 struct mvneta_tx_queue *txq;
1852 struct netdev_queue *nq;
1854 while (cause_tx_done) {
1855 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1857 nq = netdev_get_tx_queue(pp->dev, txq->id);
1858 __netif_tx_lock(nq, smp_processor_id());
1861 mvneta_txq_done(pp, txq);
1863 __netif_tx_unlock(nq);
1864 cause_tx_done &= ~((1 << txq->id));
1868 /* Compute crc8 of the specified address, using a unique algorithm ,
1869 * according to hw spec, different than generic crc8 algorithm
1871 static int mvneta_addr_crc(unsigned char *addr)
1876 for (i = 0; i < ETH_ALEN; i++) {
1879 crc = (crc ^ addr[i]) << 8;
1880 for (j = 7; j >= 0; j--) {
1881 if (crc & (0x100 << j))
1889 /* This method controls the net device special MAC multicast support.
1890 * The Special Multicast Table for MAC addresses supports MAC of the form
1891 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1892 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1893 * Table entries in the DA-Filter table. This method set the Special
1894 * Multicast Table appropriate entry.
1896 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1897 unsigned char last_byte,
1900 unsigned int smc_table_reg;
1901 unsigned int tbl_offset;
1902 unsigned int reg_offset;
1904 /* Register offset from SMC table base */
1905 tbl_offset = (last_byte / 4);
1906 /* Entry offset within the above reg */
1907 reg_offset = last_byte % 4;
1909 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1913 smc_table_reg &= ~(0xff << (8 * reg_offset));
1915 smc_table_reg &= ~(0xff << (8 * reg_offset));
1916 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1919 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1923 /* This method controls the network device Other MAC multicast support.
1924 * The Other Multicast Table is used for multicast of another type.
1925 * A CRC-8 is used as an index to the Other Multicast Table entries
1926 * in the DA-Filter table.
1927 * The method gets the CRC-8 value from the calling routine and
1928 * sets the Other Multicast Table appropriate entry according to the
1931 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1935 unsigned int omc_table_reg;
1936 unsigned int tbl_offset;
1937 unsigned int reg_offset;
1939 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1940 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1942 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1945 /* Clear accepts frame bit at specified Other DA table entry */
1946 omc_table_reg &= ~(0xff << (8 * reg_offset));
1948 omc_table_reg &= ~(0xff << (8 * reg_offset));
1949 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1952 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1955 /* The network device supports multicast using two tables:
1956 * 1) Special Multicast Table for MAC addresses of the form
1957 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1958 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1959 * Table entries in the DA-Filter table.
1960 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1961 * is used as an index to the Other Multicast Table entries in the
1964 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1967 unsigned char crc_result = 0;
1969 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1970 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1974 crc_result = mvneta_addr_crc(p_addr);
1976 if (pp->mcast_count[crc_result] == 0) {
1977 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1982 pp->mcast_count[crc_result]--;
1983 if (pp->mcast_count[crc_result] != 0) {
1984 netdev_info(pp->dev,
1985 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1986 pp->mcast_count[crc_result], crc_result);
1990 pp->mcast_count[crc_result]++;
1992 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1997 /* Configure Fitering mode of Ethernet port */
1998 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2001 u32 port_cfg_reg, val;
2003 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2005 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2007 /* Set / Clear UPM bit in port configuration register */
2009 /* Accept all Unicast addresses */
2010 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2011 val |= MVNETA_FORCE_UNI;
2012 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2013 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2015 /* Reject all Unicast addresses */
2016 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2017 val &= ~MVNETA_FORCE_UNI;
2020 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2021 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2024 /* register unicast and multicast addresses */
2025 static void mvneta_set_rx_mode(struct net_device *dev)
2027 struct mvneta_port *pp = netdev_priv(dev);
2028 struct netdev_hw_addr *ha;
2030 if (dev->flags & IFF_PROMISC) {
2031 /* Accept all: Multicast + Unicast */
2032 mvneta_rx_unicast_promisc_set(pp, 1);
2033 mvneta_set_ucast_table(pp, rxq_def);
2034 mvneta_set_special_mcast_table(pp, rxq_def);
2035 mvneta_set_other_mcast_table(pp, rxq_def);
2037 /* Accept single Unicast */
2038 mvneta_rx_unicast_promisc_set(pp, 0);
2039 mvneta_set_ucast_table(pp, -1);
2040 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2042 if (dev->flags & IFF_ALLMULTI) {
2043 /* Accept all multicast */
2044 mvneta_set_special_mcast_table(pp, rxq_def);
2045 mvneta_set_other_mcast_table(pp, rxq_def);
2047 /* Accept only initialized multicast */
2048 mvneta_set_special_mcast_table(pp, -1);
2049 mvneta_set_other_mcast_table(pp, -1);
2051 if (!netdev_mc_empty(dev)) {
2052 netdev_for_each_mc_addr(ha, dev) {
2053 mvneta_mcast_addr_set(pp, ha->addr,
2061 /* Interrupt handling - the callback for request_irq() */
2062 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2064 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2066 /* Mask all interrupts */
2067 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2069 napi_schedule(&pp->napi);
2074 static int mvneta_fixed_link_update(struct mvneta_port *pp,
2075 struct phy_device *phy)
2077 struct fixed_phy_status status;
2078 struct fixed_phy_status changed = {};
2079 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2081 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2082 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2083 status.speed = SPEED_1000;
2084 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2085 status.speed = SPEED_100;
2087 status.speed = SPEED_10;
2088 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2092 fixed_phy_update_state(phy, &status, &changed);
2097 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2098 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2099 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2100 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2101 * Each CPU has its own causeRxTx register
2103 static int mvneta_poll(struct napi_struct *napi, int budget)
2107 unsigned long flags;
2108 struct mvneta_port *pp = netdev_priv(napi->dev);
2110 if (!netif_running(pp->dev)) {
2111 napi_complete(napi);
2115 /* Read cause register */
2116 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2117 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2118 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2120 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2121 if (pp->use_inband_status && (cause_misc &
2122 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2123 MVNETA_CAUSE_LINK_CHANGE |
2124 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2125 mvneta_fixed_link_update(pp, pp->phy_dev);
2129 /* Release Tx descriptors */
2130 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2131 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2132 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2135 /* For the case where the last mvneta_poll did not process all
2138 cause_rx_tx |= pp->cause_rx_tx;
2139 if (rxq_number > 1) {
2140 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
2142 struct mvneta_rx_queue *rxq;
2143 /* get rx queue number from cause_rx_tx */
2144 rxq = mvneta_rx_policy(pp, cause_rx_tx);
2148 /* process the packet in that rx queue */
2149 count = mvneta_rx(pp, budget, rxq);
2153 /* set off the rx bit of the
2154 * corresponding bit in the cause rx
2155 * tx register, so that next iteration
2156 * will find the next rx queue where
2157 * packets are received on
2159 cause_rx_tx &= ~((1 << rxq->id) << 8);
2163 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2169 napi_complete(napi);
2170 local_irq_save(flags);
2171 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2172 MVNETA_RX_INTR_MASK(rxq_number) |
2173 MVNETA_TX_INTR_MASK(txq_number) |
2174 MVNETA_MISCINTR_INTR_MASK);
2175 local_irq_restore(flags);
2178 pp->cause_rx_tx = cause_rx_tx;
2182 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2183 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2188 for (i = 0; i < num; i++) {
2189 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2190 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2191 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2192 __func__, rxq->id, i, num);
2197 /* Add this number of RX descriptors as non occupied (ready to
2200 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2205 /* Free all packets pending transmit from all TXQs and reset TX port */
2206 static void mvneta_tx_reset(struct mvneta_port *pp)
2210 /* free the skb's in the tx ring */
2211 for (queue = 0; queue < txq_number; queue++)
2212 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2214 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2215 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2218 static void mvneta_rx_reset(struct mvneta_port *pp)
2220 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2221 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2224 /* Rx/Tx queue initialization/cleanup methods */
2226 /* Create a specified RX queue */
2227 static int mvneta_rxq_init(struct mvneta_port *pp,
2228 struct mvneta_rx_queue *rxq)
2231 rxq->size = pp->rx_ring_size;
2233 /* Allocate memory for RX descriptors */
2234 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2235 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2236 &rxq->descs_phys, GFP_KERNEL);
2237 if (rxq->descs == NULL)
2240 BUG_ON(rxq->descs !=
2241 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2243 rxq->last_desc = rxq->size - 1;
2245 /* Set Rx descriptors queue starting address */
2246 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2247 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2250 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2252 /* Set coalescing pkts and time */
2253 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2254 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2256 /* Fill RXQ with buffers from RX pool */
2257 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2258 mvneta_rxq_bm_disable(pp, rxq);
2259 mvneta_rxq_fill(pp, rxq, rxq->size);
2264 /* Cleanup Rx queue */
2265 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2266 struct mvneta_rx_queue *rxq)
2268 mvneta_rxq_drop_pkts(pp, rxq);
2271 dma_free_coherent(pp->dev->dev.parent,
2272 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2278 rxq->next_desc_to_proc = 0;
2279 rxq->descs_phys = 0;
2282 /* Create and initialize a tx queue */
2283 static int mvneta_txq_init(struct mvneta_port *pp,
2284 struct mvneta_tx_queue *txq)
2286 txq->size = pp->tx_ring_size;
2288 /* A queue must always have room for at least one skb.
2289 * Therefore, stop the queue when the free entries reaches
2290 * the maximum number of descriptors per skb.
2292 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2293 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2296 /* Allocate memory for TX descriptors */
2297 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2298 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2299 &txq->descs_phys, GFP_KERNEL);
2300 if (txq->descs == NULL)
2303 /* Make sure descriptor address is cache line size aligned */
2304 BUG_ON(txq->descs !=
2305 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2307 txq->last_desc = txq->size - 1;
2309 /* Set maximum bandwidth for enabled TXQs */
2310 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2311 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2313 /* Set Tx descriptors queue starting address */
2314 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2315 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2317 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2318 if (txq->tx_skb == NULL) {
2319 dma_free_coherent(pp->dev->dev.parent,
2320 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2321 txq->descs, txq->descs_phys);
2325 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2326 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2327 txq->size * TSO_HEADER_SIZE,
2328 &txq->tso_hdrs_phys, GFP_KERNEL);
2329 if (txq->tso_hdrs == NULL) {
2331 dma_free_coherent(pp->dev->dev.parent,
2332 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2333 txq->descs, txq->descs_phys);
2336 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2341 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2342 static void mvneta_txq_deinit(struct mvneta_port *pp,
2343 struct mvneta_tx_queue *txq)
2348 dma_free_coherent(pp->dev->dev.parent,
2349 txq->size * TSO_HEADER_SIZE,
2350 txq->tso_hdrs, txq->tso_hdrs_phys);
2352 dma_free_coherent(pp->dev->dev.parent,
2353 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2354 txq->descs, txq->descs_phys);
2358 txq->next_desc_to_proc = 0;
2359 txq->descs_phys = 0;
2361 /* Set minimum bandwidth for disabled TXQs */
2362 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2363 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2365 /* Set Tx descriptors queue starting address and size */
2366 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2367 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2370 /* Cleanup all Tx queues */
2371 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2375 for (queue = 0; queue < txq_number; queue++)
2376 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2379 /* Cleanup all Rx queues */
2380 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2384 for (queue = 0; queue < rxq_number; queue++)
2385 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2389 /* Init all Rx queues */
2390 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2394 for (queue = 0; queue < rxq_number; queue++) {
2395 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2397 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2399 mvneta_cleanup_rxqs(pp);
2407 /* Init all tx queues */
2408 static int mvneta_setup_txqs(struct mvneta_port *pp)
2412 for (queue = 0; queue < txq_number; queue++) {
2413 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2415 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2417 mvneta_cleanup_txqs(pp);
2425 static void mvneta_start_dev(struct mvneta_port *pp)
2427 mvneta_max_rx_size_set(pp, pp->pkt_size);
2428 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2430 /* start the Rx/Tx activity */
2431 mvneta_port_enable(pp);
2433 /* Enable polling on the port */
2434 napi_enable(&pp->napi);
2436 /* Unmask interrupts */
2437 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2438 MVNETA_RX_INTR_MASK(rxq_number) |
2439 MVNETA_TX_INTR_MASK(txq_number) |
2440 MVNETA_MISCINTR_INTR_MASK);
2441 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2442 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2443 MVNETA_CAUSE_LINK_CHANGE |
2444 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2446 phy_start(pp->phy_dev);
2447 netif_tx_start_all_queues(pp->dev);
2450 static void mvneta_stop_dev(struct mvneta_port *pp)
2452 phy_stop(pp->phy_dev);
2454 napi_disable(&pp->napi);
2456 netif_carrier_off(pp->dev);
2458 mvneta_port_down(pp);
2459 netif_tx_stop_all_queues(pp->dev);
2461 /* Stop the port activity */
2462 mvneta_port_disable(pp);
2464 /* Clear all ethernet port interrupts */
2465 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2466 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2468 /* Mask all ethernet port interrupts */
2469 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2470 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2471 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2473 mvneta_tx_reset(pp);
2474 mvneta_rx_reset(pp);
2477 /* Return positive if MTU is valid */
2478 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2481 netdev_err(dev, "cannot change mtu to less than 68\n");
2485 /* 9676 == 9700 - 20 and rounding to 8 */
2487 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2491 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2492 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2493 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2494 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2500 /* Change the device mtu */
2501 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2503 struct mvneta_port *pp = netdev_priv(dev);
2506 mtu = mvneta_check_mtu_valid(dev, mtu);
2512 if (!netif_running(dev)) {
2513 netdev_update_features(dev);
2517 /* The interface is running, so we have to force a
2518 * reallocation of the queues
2520 mvneta_stop_dev(pp);
2522 mvneta_cleanup_txqs(pp);
2523 mvneta_cleanup_rxqs(pp);
2525 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2526 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2529 ret = mvneta_setup_rxqs(pp);
2531 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2535 ret = mvneta_setup_txqs(pp);
2537 netdev_err(dev, "unable to setup txqs after MTU change\n");
2541 mvneta_start_dev(pp);
2544 netdev_update_features(dev);
2549 static netdev_features_t mvneta_fix_features(struct net_device *dev,
2550 netdev_features_t features)
2552 struct mvneta_port *pp = netdev_priv(dev);
2554 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2555 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2557 "Disable IP checksum for MTU greater than %dB\n",
2564 /* Get mac address */
2565 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2567 u32 mac_addr_l, mac_addr_h;
2569 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2570 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2571 addr[0] = (mac_addr_h >> 24) & 0xFF;
2572 addr[1] = (mac_addr_h >> 16) & 0xFF;
2573 addr[2] = (mac_addr_h >> 8) & 0xFF;
2574 addr[3] = mac_addr_h & 0xFF;
2575 addr[4] = (mac_addr_l >> 8) & 0xFF;
2576 addr[5] = mac_addr_l & 0xFF;
2579 /* Handle setting mac address */
2580 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2582 struct mvneta_port *pp = netdev_priv(dev);
2583 struct sockaddr *sockaddr = addr;
2586 ret = eth_prepare_mac_addr_change(dev, addr);
2589 /* Remove previous address table entry */
2590 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2592 /* Set new addr in hw */
2593 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2595 eth_commit_mac_addr_change(dev, addr);
2599 static void mvneta_adjust_link(struct net_device *ndev)
2601 struct mvneta_port *pp = netdev_priv(ndev);
2602 struct phy_device *phydev = pp->phy_dev;
2603 int status_change = 0;
2606 if ((pp->speed != phydev->speed) ||
2607 (pp->duplex != phydev->duplex)) {
2610 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2611 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2612 MVNETA_GMAC_CONFIG_GMII_SPEED |
2613 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2616 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2618 if (phydev->speed == SPEED_1000)
2619 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2620 else if (phydev->speed == SPEED_100)
2621 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2623 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2625 pp->duplex = phydev->duplex;
2626 pp->speed = phydev->speed;
2630 if (phydev->link != pp->link) {
2631 if (!phydev->link) {
2636 pp->link = phydev->link;
2640 if (status_change) {
2642 if (!pp->use_inband_status) {
2643 u32 val = mvreg_read(pp,
2644 MVNETA_GMAC_AUTONEG_CONFIG);
2645 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2646 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2647 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2652 if (!pp->use_inband_status) {
2653 u32 val = mvreg_read(pp,
2654 MVNETA_GMAC_AUTONEG_CONFIG);
2655 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2656 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2657 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2660 mvneta_port_down(pp);
2662 phy_print_status(phydev);
2666 static int mvneta_mdio_probe(struct mvneta_port *pp)
2668 struct phy_device *phy_dev;
2670 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2673 netdev_err(pp->dev, "could not find the PHY\n");
2677 phy_dev->supported &= PHY_GBIT_FEATURES;
2678 phy_dev->advertising = phy_dev->supported;
2680 pp->phy_dev = phy_dev;
2688 static void mvneta_mdio_remove(struct mvneta_port *pp)
2690 phy_disconnect(pp->phy_dev);
2694 static int mvneta_open(struct net_device *dev)
2696 struct mvneta_port *pp = netdev_priv(dev);
2699 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2700 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2701 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2703 ret = mvneta_setup_rxqs(pp);
2707 ret = mvneta_setup_txqs(pp);
2709 goto err_cleanup_rxqs;
2711 /* Connect to port interrupt line */
2712 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2713 MVNETA_DRIVER_NAME, pp);
2715 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2716 goto err_cleanup_txqs;
2719 /* In default link is down */
2720 netif_carrier_off(pp->dev);
2722 ret = mvneta_mdio_probe(pp);
2724 netdev_err(dev, "cannot probe MDIO bus\n");
2728 mvneta_start_dev(pp);
2733 free_irq(pp->dev->irq, pp);
2735 mvneta_cleanup_txqs(pp);
2737 mvneta_cleanup_rxqs(pp);
2741 /* Stop the port, free port interrupt line */
2742 static int mvneta_stop(struct net_device *dev)
2744 struct mvneta_port *pp = netdev_priv(dev);
2746 mvneta_stop_dev(pp);
2747 mvneta_mdio_remove(pp);
2748 free_irq(dev->irq, pp);
2749 mvneta_cleanup_rxqs(pp);
2750 mvneta_cleanup_txqs(pp);
2755 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2757 struct mvneta_port *pp = netdev_priv(dev);
2762 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2765 /* Ethtool methods */
2767 /* Get settings (phy address, speed) for ethtools */
2768 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2770 struct mvneta_port *pp = netdev_priv(dev);
2775 return phy_ethtool_gset(pp->phy_dev, cmd);
2778 /* Set settings (phy address, speed) for ethtools */
2779 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2781 struct mvneta_port *pp = netdev_priv(dev);
2786 return phy_ethtool_sset(pp->phy_dev, cmd);
2789 /* Set interrupt coalescing for ethtools */
2790 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2791 struct ethtool_coalesce *c)
2793 struct mvneta_port *pp = netdev_priv(dev);
2796 for (queue = 0; queue < rxq_number; queue++) {
2797 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2798 rxq->time_coal = c->rx_coalesce_usecs;
2799 rxq->pkts_coal = c->rx_max_coalesced_frames;
2800 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2801 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2804 for (queue = 0; queue < txq_number; queue++) {
2805 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2806 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2807 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2813 /* get coalescing for ethtools */
2814 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2815 struct ethtool_coalesce *c)
2817 struct mvneta_port *pp = netdev_priv(dev);
2819 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2820 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2822 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2827 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2828 struct ethtool_drvinfo *drvinfo)
2830 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2831 sizeof(drvinfo->driver));
2832 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2833 sizeof(drvinfo->version));
2834 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2835 sizeof(drvinfo->bus_info));
2839 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2840 struct ethtool_ringparam *ring)
2842 struct mvneta_port *pp = netdev_priv(netdev);
2844 ring->rx_max_pending = MVNETA_MAX_RXD;
2845 ring->tx_max_pending = MVNETA_MAX_TXD;
2846 ring->rx_pending = pp->rx_ring_size;
2847 ring->tx_pending = pp->tx_ring_size;
2850 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2851 struct ethtool_ringparam *ring)
2853 struct mvneta_port *pp = netdev_priv(dev);
2855 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2857 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2858 ring->rx_pending : MVNETA_MAX_RXD;
2860 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2861 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2862 if (pp->tx_ring_size != ring->tx_pending)
2863 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2864 pp->tx_ring_size, ring->tx_pending);
2866 if (netif_running(dev)) {
2868 if (mvneta_open(dev)) {
2870 "error on opening device after ring param change\n");
2878 static const struct net_device_ops mvneta_netdev_ops = {
2879 .ndo_open = mvneta_open,
2880 .ndo_stop = mvneta_stop,
2881 .ndo_start_xmit = mvneta_tx,
2882 .ndo_set_rx_mode = mvneta_set_rx_mode,
2883 .ndo_set_mac_address = mvneta_set_mac_addr,
2884 .ndo_change_mtu = mvneta_change_mtu,
2885 .ndo_fix_features = mvneta_fix_features,
2886 .ndo_get_stats64 = mvneta_get_stats64,
2887 .ndo_do_ioctl = mvneta_ioctl,
2890 const struct ethtool_ops mvneta_eth_tool_ops = {
2891 .get_link = ethtool_op_get_link,
2892 .get_settings = mvneta_ethtool_get_settings,
2893 .set_settings = mvneta_ethtool_set_settings,
2894 .set_coalesce = mvneta_ethtool_set_coalesce,
2895 .get_coalesce = mvneta_ethtool_get_coalesce,
2896 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2897 .get_ringparam = mvneta_ethtool_get_ringparam,
2898 .set_ringparam = mvneta_ethtool_set_ringparam,
2902 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2907 mvneta_port_disable(pp);
2909 /* Set port default values */
2910 mvneta_defaults_set(pp);
2912 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2917 /* Initialize TX descriptor rings */
2918 for (queue = 0; queue < txq_number; queue++) {
2919 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2921 txq->size = pp->tx_ring_size;
2922 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2925 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2930 /* Create Rx descriptor rings */
2931 for (queue = 0; queue < rxq_number; queue++) {
2932 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2934 rxq->size = pp->rx_ring_size;
2935 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2936 rxq->time_coal = MVNETA_RX_COAL_USEC;
2942 /* platform glue : initialize decoding windows */
2943 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2944 const struct mbus_dram_target_info *dram)
2950 for (i = 0; i < 6; i++) {
2951 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2952 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2955 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2961 for (i = 0; i < dram->num_cs; i++) {
2962 const struct mbus_dram_window *cs = dram->cs + i;
2963 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2964 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2966 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2967 (cs->size - 1) & 0xffff0000);
2969 win_enable &= ~(1 << i);
2970 win_protect |= 3 << (2 * i);
2973 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2976 /* Power up the port */
2977 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2981 /* MAC Cause register should be cleared */
2982 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2984 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2986 /* Even though it might look weird, when we're configured in
2987 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2990 case PHY_INTERFACE_MODE_QSGMII:
2991 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2992 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2994 case PHY_INTERFACE_MODE_SGMII:
2995 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2996 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2998 case PHY_INTERFACE_MODE_RGMII:
2999 case PHY_INTERFACE_MODE_RGMII_ID:
3000 ctrl |= MVNETA_GMAC2_PORT_RGMII;
3006 if (pp->use_inband_status)
3007 ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3009 /* Cancel Port Reset */
3010 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3011 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
3013 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3014 MVNETA_GMAC2_PORT_RESET) != 0)
3020 /* Device initialization routine */
3021 static int mvneta_probe(struct platform_device *pdev)
3023 const struct mbus_dram_target_info *dram_target_info;
3024 struct resource *res;
3025 struct device_node *dn = pdev->dev.of_node;
3026 struct device_node *phy_node;
3027 struct mvneta_port *pp;
3028 struct net_device *dev;
3029 const char *dt_mac_addr;
3030 char hw_mac_addr[ETH_ALEN];
3031 const char *mac_from;
3032 const char *managed;
3036 /* Our multiqueue support is not complete, so for now, only
3037 * allow the usage of the first RX queue
3040 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
3044 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
3048 dev->irq = irq_of_parse_and_map(dn, 0);
3049 if (dev->irq == 0) {
3051 goto err_free_netdev;
3054 phy_node = of_parse_phandle(dn, "phy", 0);
3056 if (!of_phy_is_fixed_link(dn)) {
3057 dev_err(&pdev->dev, "no PHY specified\n");
3062 err = of_phy_register_fixed_link(dn);
3064 dev_err(&pdev->dev, "cannot register fixed PHY\n");
3068 /* In the case of a fixed PHY, the DT node associated
3069 * to the PHY is the Ethernet MAC DT node.
3071 phy_node = of_node_get(dn);
3074 phy_mode = of_get_phy_mode(dn);
3076 dev_err(&pdev->dev, "incorrect phy-mode\n");
3078 goto err_put_phy_node;
3081 dev->tx_queue_len = MVNETA_MAX_TXD;
3082 dev->watchdog_timeo = 5 * HZ;
3083 dev->netdev_ops = &mvneta_netdev_ops;
3085 dev->ethtool_ops = &mvneta_eth_tool_ops;
3087 pp = netdev_priv(dev);
3088 pp->phy_node = phy_node;
3089 pp->phy_interface = phy_mode;
3091 err = of_property_read_string(dn, "managed", &managed);
3092 pp->use_inband_status = (err == 0 &&
3093 strcmp(managed, "in-band-status") == 0);
3095 pp->clk = devm_clk_get(&pdev->dev, NULL);
3096 if (IS_ERR(pp->clk)) {
3097 err = PTR_ERR(pp->clk);
3098 goto err_put_phy_node;
3101 clk_prepare_enable(pp->clk);
3103 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3104 pp->base = devm_ioremap_resource(&pdev->dev, res);
3105 if (IS_ERR(pp->base)) {
3106 err = PTR_ERR(pp->base);
3110 /* Alloc per-cpu stats */
3111 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
3117 dt_mac_addr = of_get_mac_address(dn);
3119 mac_from = "device tree";
3120 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3122 mvneta_get_mac_addr(pp, hw_mac_addr);
3123 if (is_valid_ether_addr(hw_mac_addr)) {
3124 mac_from = "hardware";
3125 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3127 mac_from = "random";
3128 eth_hw_addr_random(dev);
3132 if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
3133 pp->tx_csum_limit = 1600;
3135 pp->tx_ring_size = MVNETA_MAX_TXD;
3136 pp->rx_ring_size = MVNETA_MAX_RXD;
3139 SET_NETDEV_DEV(dev, &pdev->dev);
3141 err = mvneta_init(&pdev->dev, pp);
3143 goto err_free_stats;
3145 err = mvneta_port_power_up(pp, phy_mode);
3147 dev_err(&pdev->dev, "can't power up port\n");
3148 goto err_free_stats;
3151 dram_target_info = mv_mbus_dram_info();
3152 if (dram_target_info)
3153 mvneta_conf_mbus_windows(pp, dram_target_info);
3155 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3157 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3158 dev->hw_features |= dev->features;
3159 dev->vlan_features |= dev->features;
3160 dev->priv_flags |= IFF_UNICAST_FLT;
3161 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
3163 err = register_netdev(dev);
3165 dev_err(&pdev->dev, "failed to register\n");
3166 goto err_free_stats;
3169 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3172 platform_set_drvdata(pdev, pp->dev);
3174 if (pp->use_inband_status) {
3175 struct phy_device *phy = of_phy_find_device(dn);
3177 mvneta_fixed_link_update(pp, phy);
3179 put_device(&phy->dev);
3185 free_percpu(pp->stats);
3187 clk_disable_unprepare(pp->clk);
3189 of_node_put(phy_node);
3191 irq_dispose_mapping(dev->irq);
3197 /* Device removal routine */
3198 static int mvneta_remove(struct platform_device *pdev)
3200 struct net_device *dev = platform_get_drvdata(pdev);
3201 struct mvneta_port *pp = netdev_priv(dev);
3203 unregister_netdev(dev);
3204 clk_disable_unprepare(pp->clk);
3205 free_percpu(pp->stats);
3206 irq_dispose_mapping(dev->irq);
3207 of_node_put(pp->phy_node);
3213 static const struct of_device_id mvneta_match[] = {
3214 { .compatible = "marvell,armada-370-neta" },
3215 { .compatible = "marvell,armada-xp-neta" },
3218 MODULE_DEVICE_TABLE(of, mvneta_match);
3220 static struct platform_driver mvneta_driver = {
3221 .probe = mvneta_probe,
3222 .remove = mvneta_remove,
3224 .name = MVNETA_DRIVER_NAME,
3225 .of_match_table = mvneta_match,
3229 module_platform_driver(mvneta_driver);
3231 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3232 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3233 MODULE_LICENSE("GPL");
3235 module_param(rxq_number, int, S_IRUGO);
3236 module_param(txq_number, int, S_IRUGO);
3238 module_param(rxq_def, int, S_IRUGO);
3239 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);