2 * Copyright 2010-2011 Calxeda, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/circ_buf.h>
20 #include <linux/interrupt.h>
21 #include <linux/etherdevice.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
26 #include <linux/crc32.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/slab.h>
30 /* XGMAC Register definitions */
31 #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
32 #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
33 #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
34 #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
35 #define XGMAC_VERSION 0x00000020 /* Version */
36 #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
37 #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
38 #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
39 #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
40 #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
41 #define XGMAC_DEBUG 0x00000038 /* Debug */
42 #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
43 #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
44 #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
45 #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
46 #define XGMAC_NUM_HASH 16
47 #define XGMAC_OMR 0x00000400
48 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
49 #define XGMAC_PMT 0x00000704 /* PMT Control and Status */
50 #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
51 #define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
52 #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
53 #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
54 #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
56 /* Hardware TX Statistics Counters */
57 #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
58 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
59 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
60 #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
61 #define XGMAC_MMC_TXBCFRAME_G 0x00000824
62 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C
63 #define XGMAC_MMC_TXUCFRAME_GB 0x00000864
64 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
65 #define XGMAC_MMC_TXBCFRAME_GB 0x00000874
66 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C
67 #define XGMAC_MMC_TXOCTET_G_LO 0x00000884
68 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888
69 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
70 #define XGMAC_MMC_TXFRAME_G_HI 0x00000890
71 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894
72 #define XGMAC_MMC_TXVLANFRAME 0x0000089C
74 /* Hardware RX Statistics Counters */
75 #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
76 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
77 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
78 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
79 #define XGMAC_MMC_RXOCTET_G_LO 0x00000910
80 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914
81 #define XGMAC_MMC_RXBCFRAME_G 0x00000918
82 #define XGMAC_MMC_RXMCFRAME_G 0x00000920
83 #define XGMAC_MMC_RXCRCERR 0x00000928
84 #define XGMAC_MMC_RXRUNT 0x00000930
85 #define XGMAC_MMC_RXJABBER 0x00000934
86 #define XGMAC_MMC_RXUCFRAME_G 0x00000970
87 #define XGMAC_MMC_RXLENGTHERR 0x00000978
88 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988
89 #define XGMAC_MMC_RXOVERFLOW 0x00000990
90 #define XGMAC_MMC_RXVLANFRAME 0x00000998
91 #define XGMAC_MMC_RXWATCHDOG 0x000009a0
93 /* DMA Control and Status Registers */
94 #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
95 #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
96 #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
97 #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
98 #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
99 #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
100 #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
101 #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
102 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
103 #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
104 #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
105 #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
106 #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
108 #define XGMAC_ADDR_AE 0x80000000
109 #define XGMAC_MAX_FILTER_ADDR 31
111 /* PMT Control and Status */
112 #define XGMAC_PMT_POINTER_RESET 0x80000000
113 #define XGMAC_PMT_GLBL_UNICAST 0x00000200
114 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
115 #define XGMAC_PMT_MAGIC_PKT 0x00000020
116 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
117 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
118 #define XGMAC_PMT_POWERDOWN 0x00000001
120 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
121 #define XGMAC_CONTROL_SPD_MASK 0x60000000
122 #define XGMAC_CONTROL_SPD_1G 0x60000000
123 #define XGMAC_CONTROL_SPD_2_5G 0x40000000
124 #define XGMAC_CONTROL_SPD_10G 0x00000000
125 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
126 #define XGMAC_CONTROL_SARK_MASK 0x18000000
127 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
128 #define XGMAC_CONTROL_CAR_MASK 0x06000000
129 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
130 #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
131 #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
132 #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
133 #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
134 #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
135 #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
136 #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
137 #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
138 #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
140 /* XGMAC Frame Filter defines */
141 #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
142 #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
143 #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
144 #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
145 #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
146 #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
147 #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
148 #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
149 #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
150 #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
151 #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
152 #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
154 /* XGMAC FLOW CTRL defines */
155 #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
156 #define XGMAC_FLOW_CTRL_PT_SHIFT 16
157 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
158 #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
159 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
160 #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
161 #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
162 #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
163 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
165 /* XGMAC_INT_STAT reg */
166 #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
167 #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
169 /* DMA Bus Mode register defines */
170 #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
171 #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
172 #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
173 #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
175 /* Programmable burst length */
176 #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
177 #define DMA_BUS_MODE_PBL_SHIFT 8
178 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
179 #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
180 #define DMA_BUS_MODE_RPBL_SHIFT 17
181 #define DMA_BUS_MODE_USP 0x00800000
182 #define DMA_BUS_MODE_8PBL 0x01000000
183 #define DMA_BUS_MODE_AAL 0x02000000
185 /* DMA Bus Mode register defines */
186 #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
187 #define DMA_BUS_PR_RATIO_SHIFT 14
188 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */
190 /* DMA Control register defines */
191 #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
192 #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
193 #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
194 #define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
196 /* DMA Normal interrupt */
197 #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
198 #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
199 #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
200 #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
201 #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
202 #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
203 #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
204 #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
205 #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
206 #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
207 #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
208 #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
209 #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
210 #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
211 #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
213 #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
216 #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
218 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
219 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
222 /* DMA default interrupt mask */
223 #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
225 /* DMA Status register defines */
226 #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
227 #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
228 #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
229 #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
230 #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
231 #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
232 #define DMA_STATUS_TS_SHIFT 20
233 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
234 #define DMA_STATUS_RS_SHIFT 17
235 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
236 #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
237 #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
238 #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
239 #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
240 #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
241 #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
242 #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
243 #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
244 #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
245 #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
246 #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
247 #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
248 #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
249 #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
251 /* Common MAC defines */
252 #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
253 #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
255 /* XGMAC Operation Mode Register */
256 #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
257 #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
258 #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
259 #define XGMAC_OMR_TTC_MASK 0x00030000
260 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
261 #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
262 #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
263 #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
264 #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
265 #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
266 #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
267 #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
268 #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
269 #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
271 /* XGMAC HW Features Register */
272 #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
274 #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
276 /* XGMAC Descriptor Defines */
277 #define MAX_DESC_BUF_SZ (0x2000 - 8)
279 #define RXDESC_EXT_STATUS 0x00000001
280 #define RXDESC_CRC_ERR 0x00000002
281 #define RXDESC_RX_ERR 0x00000008
282 #define RXDESC_RX_WDOG 0x00000010
283 #define RXDESC_FRAME_TYPE 0x00000020
284 #define RXDESC_GIANT_FRAME 0x00000080
285 #define RXDESC_LAST_SEG 0x00000100
286 #define RXDESC_FIRST_SEG 0x00000200
287 #define RXDESC_VLAN_FRAME 0x00000400
288 #define RXDESC_OVERFLOW_ERR 0x00000800
289 #define RXDESC_LENGTH_ERR 0x00001000
290 #define RXDESC_SA_FILTER_FAIL 0x00002000
291 #define RXDESC_DESCRIPTOR_ERR 0x00004000
292 #define RXDESC_ERROR_SUMMARY 0x00008000
293 #define RXDESC_FRAME_LEN_OFFSET 16
294 #define RXDESC_FRAME_LEN_MASK 0x3fff0000
295 #define RXDESC_DA_FILTER_FAIL 0x40000000
297 #define RXDESC1_END_RING 0x00008000
299 #define RXDESC_IP_PAYLOAD_MASK 0x00000003
300 #define RXDESC_IP_PAYLOAD_UDP 0x00000001
301 #define RXDESC_IP_PAYLOAD_TCP 0x00000002
302 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003
303 #define RXDESC_IP_HEADER_ERR 0x00000008
304 #define RXDESC_IP_PAYLOAD_ERR 0x00000010
305 #define RXDESC_IPV4_PACKET 0x00000040
306 #define RXDESC_IPV6_PACKET 0x00000080
307 #define TXDESC_UNDERFLOW_ERR 0x00000001
308 #define TXDESC_JABBER_TIMEOUT 0x00000002
309 #define TXDESC_LOCAL_FAULT 0x00000004
310 #define TXDESC_REMOTE_FAULT 0x00000008
311 #define TXDESC_VLAN_FRAME 0x00000010
312 #define TXDESC_FRAME_FLUSHED 0x00000020
313 #define TXDESC_IP_HEADER_ERR 0x00000040
314 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
315 #define TXDESC_ERROR_SUMMARY 0x00008000
316 #define TXDESC_SA_CTRL_INSERT 0x00040000
317 #define TXDESC_SA_CTRL_REPLACE 0x00080000
318 #define TXDESC_2ND_ADDR_CHAINED 0x00100000
319 #define TXDESC_END_RING 0x00200000
320 #define TXDESC_CSUM_IP 0x00400000
321 #define TXDESC_CSUM_IP_PAYLD 0x00800000
322 #define TXDESC_CSUM_ALL 0x00C00000
323 #define TXDESC_CRC_EN_REPLACE 0x01000000
324 #define TXDESC_CRC_EN_APPEND 0x02000000
325 #define TXDESC_DISABLE_PAD 0x04000000
326 #define TXDESC_FIRST_SEG 0x10000000
327 #define TXDESC_LAST_SEG 0x20000000
328 #define TXDESC_INTERRUPT 0x40000000
330 #define DESC_OWN 0x80000000
331 #define DESC_BUFFER1_SZ_MASK 0x00001fff
332 #define DESC_BUFFER2_SZ_MASK 0x1fff0000
333 #define DESC_BUFFER2_SZ_OFFSET 16
335 struct xgmac_dma_desc {
338 __le32 buf1_addr; /* Buffer 1 Address Pointer */
339 __le32 buf2_addr; /* Buffer 2 Address Pointer */
344 struct xgmac_extra_stats {
345 /* Transmit errors */
346 unsigned long tx_jabber;
347 unsigned long tx_frame_flushed;
348 unsigned long tx_payload_error;
349 unsigned long tx_ip_header_error;
350 unsigned long tx_local_fault;
351 unsigned long tx_remote_fault;
353 unsigned long rx_watchdog;
354 unsigned long rx_da_filter_fail;
355 unsigned long rx_sa_filter_fail;
356 unsigned long rx_payload_error;
357 unsigned long rx_ip_header_error;
358 /* Tx/Rx IRQ errors */
359 unsigned long tx_undeflow;
360 unsigned long tx_process_stopped;
361 unsigned long rx_buf_unav;
362 unsigned long rx_process_stopped;
363 unsigned long tx_early;
364 unsigned long fatal_bus_error;
368 struct xgmac_dma_desc *dma_rx;
369 struct sk_buff **rx_skbuff;
370 unsigned int rx_tail;
371 unsigned int rx_head;
373 struct xgmac_dma_desc *dma_tx;
374 struct sk_buff **tx_skbuff;
375 unsigned int tx_head;
376 unsigned int tx_tail;
380 unsigned int dma_buf_sz;
381 dma_addr_t dma_rx_phy;
382 dma_addr_t dma_tx_phy;
384 struct net_device *dev;
385 struct device *device;
386 struct napi_struct napi;
388 struct xgmac_extra_stats xstats;
390 spinlock_t stats_lock;
397 /* XGMAC Configuration Settings */
399 #define PAUSE_TIME 0x400
401 #define DMA_RX_RING_SZ 256
402 #define DMA_TX_RING_SZ 128
403 /* minimum number of free TX descriptors required to wake up TX process */
404 #define TX_THRESH (DMA_TX_RING_SZ/4)
406 /* DMA descriptor ring helpers */
407 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
408 #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
409 #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
411 /* XGMAC Descriptor Access Helpers */
412 static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
414 if (buf_sz > MAX_DESC_BUF_SZ)
415 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
416 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
418 p->buf_size = cpu_to_le32(buf_sz);
421 static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
423 u32 len = cpu_to_le32(p->flags);
424 return (len & DESC_BUFFER1_SZ_MASK) +
425 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
428 static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
431 struct xgmac_dma_desc *end = p + ring_size - 1;
433 memset(p, 0, sizeof(*p) * ring_size);
435 for (; p <= end; p++)
436 desc_set_buf_len(p, buf_sz);
438 end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
441 static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
443 memset(p, 0, sizeof(*p) * ring_size);
444 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
447 static inline int desc_get_owner(struct xgmac_dma_desc *p)
449 return le32_to_cpu(p->flags) & DESC_OWN;
452 static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
454 /* Clear all fields and set the owner */
455 p->flags = cpu_to_le32(DESC_OWN);
458 static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
460 u32 tmpflags = le32_to_cpu(p->flags);
461 tmpflags &= TXDESC_END_RING;
462 tmpflags |= flags | DESC_OWN;
463 p->flags = cpu_to_le32(tmpflags);
466 static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
468 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
471 static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
473 return le32_to_cpu(p->buf1_addr);
476 static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
479 p->buf1_addr = cpu_to_le32(paddr);
480 if (len > MAX_DESC_BUF_SZ)
481 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
484 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
487 desc_set_buf_len(p, len);
488 desc_set_buf_addr(p, paddr, len);
491 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
493 u32 data = le32_to_cpu(p->flags);
494 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
495 if (data & RXDESC_FRAME_TYPE)
501 static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
504 u32 reg = readl(ioaddr + XGMAC_OMR);
505 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
507 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
511 static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
513 struct xgmac_extra_stats *x = &priv->xstats;
514 u32 status = le32_to_cpu(p->flags);
516 if (!(status & TXDESC_ERROR_SUMMARY))
519 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
520 if (status & TXDESC_JABBER_TIMEOUT)
522 if (status & TXDESC_FRAME_FLUSHED)
523 x->tx_frame_flushed++;
524 if (status & TXDESC_UNDERFLOW_ERR)
525 xgmac_dma_flush_tx_fifo(priv->base);
526 if (status & TXDESC_IP_HEADER_ERR)
527 x->tx_ip_header_error++;
528 if (status & TXDESC_LOCAL_FAULT)
530 if (status & TXDESC_REMOTE_FAULT)
531 x->tx_remote_fault++;
532 if (status & TXDESC_PAYLOAD_CSUM_ERR)
533 x->tx_payload_error++;
538 static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
540 struct xgmac_extra_stats *x = &priv->xstats;
541 int ret = CHECKSUM_UNNECESSARY;
542 u32 status = le32_to_cpu(p->flags);
543 u32 ext_status = le32_to_cpu(p->ext_status);
545 if (status & RXDESC_DA_FILTER_FAIL) {
546 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
547 x->rx_da_filter_fail++;
551 /* All frames should fit into a single buffer */
552 if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
555 /* Check if packet has checksum already */
556 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
557 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
560 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
561 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
563 if (!(status & RXDESC_ERROR_SUMMARY))
566 /* Handle any errors */
567 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
568 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
571 if (status & RXDESC_EXT_STATUS) {
572 if (ext_status & RXDESC_IP_HEADER_ERR)
573 x->rx_ip_header_error++;
574 if (ext_status & RXDESC_IP_PAYLOAD_ERR)
575 x->rx_payload_error++;
576 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
578 return CHECKSUM_NONE;
584 static inline void xgmac_mac_enable(void __iomem *ioaddr)
586 u32 value = readl(ioaddr + XGMAC_CONTROL);
587 value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
588 writel(value, ioaddr + XGMAC_CONTROL);
590 value = readl(ioaddr + XGMAC_DMA_CONTROL);
591 value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
592 writel(value, ioaddr + XGMAC_DMA_CONTROL);
595 static inline void xgmac_mac_disable(void __iomem *ioaddr)
597 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
598 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
599 writel(value, ioaddr + XGMAC_DMA_CONTROL);
601 value = readl(ioaddr + XGMAC_CONTROL);
602 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
603 writel(value, ioaddr + XGMAC_CONTROL);
606 static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
611 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
612 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
613 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
614 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
617 static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
620 u32 hi_addr, lo_addr;
622 /* Read the MAC address from the hardware */
623 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
624 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
626 /* Extract the MAC address from the high and low words */
627 addr[0] = lo_addr & 0xff;
628 addr[1] = (lo_addr >> 8) & 0xff;
629 addr[2] = (lo_addr >> 16) & 0xff;
630 addr[3] = (lo_addr >> 24) & 0xff;
631 addr[4] = hi_addr & 0xff;
632 addr[5] = (hi_addr >> 8) & 0xff;
635 static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
638 unsigned int flow = 0;
645 flow |= XGMAC_FLOW_CTRL_RFE;
647 flow |= XGMAC_FLOW_CTRL_TFE;
649 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
650 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
652 writel(flow, priv->base + XGMAC_FLOW_CTRL);
654 reg = readl(priv->base + XGMAC_OMR);
655 reg |= XGMAC_OMR_EFC;
656 writel(reg, priv->base + XGMAC_OMR);
658 writel(0, priv->base + XGMAC_FLOW_CTRL);
660 reg = readl(priv->base + XGMAC_OMR);
661 reg &= ~XGMAC_OMR_EFC;
662 writel(reg, priv->base + XGMAC_OMR);
668 static void xgmac_rx_refill(struct xgmac_priv *priv)
670 struct xgmac_dma_desc *p;
672 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
674 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
675 int entry = priv->rx_head;
678 p = priv->dma_rx + entry;
680 if (priv->rx_skbuff[entry] == NULL) {
681 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
682 if (unlikely(skb == NULL))
685 priv->rx_skbuff[entry] = skb;
686 paddr = dma_map_single(priv->device, skb->data,
687 bufsz, DMA_FROM_DEVICE);
688 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
691 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
692 priv->rx_head, priv->rx_tail);
694 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
695 desc_set_rx_owner(p);
700 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
701 * @dev: net device structure
702 * Description: this function initializes the DMA RX/TX descriptors
703 * and allocates the socket buffers.
705 static int xgmac_dma_desc_rings_init(struct net_device *dev)
707 struct xgmac_priv *priv = netdev_priv(dev);
710 /* Set the Buffer size according to the MTU;
711 * The total buffer size including any IP offset must be a multiple
714 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
716 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
718 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
720 if (!priv->rx_skbuff)
723 priv->dma_rx = dma_alloc_coherent(priv->device,
725 sizeof(struct xgmac_dma_desc),
731 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
733 if (!priv->tx_skbuff)
736 priv->dma_tx = dma_alloc_coherent(priv->device,
738 sizeof(struct xgmac_dma_desc),
744 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
745 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
746 priv->dma_rx, priv->dma_tx,
747 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
751 priv->dma_buf_sz = bfsize;
752 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
753 xgmac_rx_refill(priv);
757 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
759 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
760 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
765 kfree(priv->tx_skbuff);
767 dma_free_coherent(priv->device,
768 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
769 priv->dma_rx, priv->dma_rx_phy);
771 kfree(priv->rx_skbuff);
775 static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
778 struct xgmac_dma_desc *p;
780 if (!priv->rx_skbuff)
783 for (i = 0; i < DMA_RX_RING_SZ; i++) {
784 if (priv->rx_skbuff[i] == NULL)
787 p = priv->dma_rx + i;
788 dma_unmap_single(priv->device, desc_get_buf_addr(p),
789 priv->dma_buf_sz, DMA_FROM_DEVICE);
790 dev_kfree_skb_any(priv->rx_skbuff[i]);
791 priv->rx_skbuff[i] = NULL;
795 static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
798 struct xgmac_dma_desc *p;
800 if (!priv->tx_skbuff)
803 for (i = 0; i < DMA_TX_RING_SZ; i++) {
804 if (priv->tx_skbuff[i] == NULL)
807 p = priv->dma_tx + i;
808 dma_unmap_single(priv->device, desc_get_buf_addr(p),
809 desc_get_buf_len(p), DMA_TO_DEVICE);
811 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
812 p = priv->dma_tx + i++;
813 dma_unmap_page(priv->device, desc_get_buf_addr(p),
814 desc_get_buf_len(p), DMA_TO_DEVICE);
817 dev_kfree_skb_any(priv->tx_skbuff[i]);
818 priv->tx_skbuff[i] = NULL;
822 static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
824 /* Release the DMA TX/RX socket buffers */
825 xgmac_free_rx_skbufs(priv);
826 xgmac_free_tx_skbufs(priv);
828 /* Free the consistent memory allocated for descriptor rings */
830 dma_free_coherent(priv->device,
831 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
832 priv->dma_tx, priv->dma_tx_phy);
836 dma_free_coherent(priv->device,
837 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
838 priv->dma_rx, priv->dma_rx_phy);
841 kfree(priv->rx_skbuff);
842 priv->rx_skbuff = NULL;
843 kfree(priv->tx_skbuff);
844 priv->tx_skbuff = NULL;
849 * @priv: private driver structure
850 * Description: it reclaims resources after transmission completes.
852 static void xgmac_tx_complete(struct xgmac_priv *priv)
856 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
857 unsigned int entry = priv->tx_tail;
858 struct sk_buff *skb = priv->tx_skbuff[entry];
859 struct xgmac_dma_desc *p = priv->dma_tx + entry;
861 /* Check if the descriptor is owned by the DMA. */
862 if (desc_get_owner(p))
865 /* Verify tx error by looking at the last segment */
866 if (desc_get_tx_ls(p))
867 desc_get_tx_status(priv, p);
869 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
870 priv->tx_head, priv->tx_tail);
872 dma_unmap_single(priv->device, desc_get_buf_addr(p),
873 desc_get_buf_len(p), DMA_TO_DEVICE);
875 priv->tx_skbuff[entry] = NULL;
876 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
882 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
883 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
885 p = priv->dma_tx + priv->tx_tail;
887 dma_unmap_page(priv->device, desc_get_buf_addr(p),
888 desc_get_buf_len(p), DMA_TO_DEVICE);
894 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
896 netif_wake_queue(priv->dev);
901 * @priv: pointer to the private device structure
902 * Description: it cleans the descriptors and restarts the transmission
905 static void xgmac_tx_err(struct xgmac_priv *priv)
907 u32 reg, value, inten;
909 netif_stop_queue(priv->dev);
911 inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
912 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
914 reg = readl(priv->base + XGMAC_DMA_CONTROL);
915 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
917 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
918 } while (value && (value != 0x600000));
920 xgmac_free_tx_skbufs(priv);
921 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
924 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
925 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
927 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
928 priv->base + XGMAC_DMA_STATUS);
929 writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
931 netif_wake_queue(priv->dev);
934 static int xgmac_hw_init(struct net_device *dev)
938 struct xgmac_priv *priv = netdev_priv(dev);
939 void __iomem *ioaddr = priv->base;
941 /* Save the ctrl register value */
942 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
945 value = DMA_BUS_MODE_SFT_RESET;
946 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
949 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
954 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
955 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
956 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
957 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
959 /* Enable interrupts */
960 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
963 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
964 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
966 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
968 if (dev->features & NETIF_F_RXCSUM)
969 ctrl |= XGMAC_CONTROL_IPC;
970 writel(ctrl, ioaddr + XGMAC_CONTROL);
972 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
974 /* Set the HW DMA mode and the COE */
975 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
979 /* Reset the MMC counters */
980 writel(1, ioaddr + XGMAC_MMC_CTRL);
985 * xgmac_open - open entry point of the driver
986 * @dev : pointer to the device structure.
988 * This function is the open entry point of the driver.
990 * 0 on success and an appropriate (-)ve integer as defined in errno.h
993 static int xgmac_open(struct net_device *dev)
996 struct xgmac_priv *priv = netdev_priv(dev);
997 void __iomem *ioaddr = priv->base;
999 /* Check that the MAC address is valid. If its not, refuse
1000 * to bring the device up. The user must specify an
1001 * address using the following linux command:
1002 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
1003 if (!is_valid_ether_addr(dev->dev_addr)) {
1004 eth_hw_addr_random(dev);
1005 netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1009 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1011 /* Initialize the XGMAC and descriptors */
1013 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1014 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1016 ret = xgmac_dma_desc_rings_init(dev);
1020 /* Enable the MAC Rx/Tx */
1021 xgmac_mac_enable(ioaddr);
1023 napi_enable(&priv->napi);
1024 netif_start_queue(dev);
1030 * xgmac_release - close entry point of the driver
1031 * @dev : device pointer.
1033 * This is the stop entry point of the driver.
1035 static int xgmac_stop(struct net_device *dev)
1037 struct xgmac_priv *priv = netdev_priv(dev);
1039 netif_stop_queue(dev);
1041 if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1042 napi_disable(&priv->napi);
1044 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1046 /* Disable the MAC core */
1047 xgmac_mac_disable(priv->base);
1049 /* Release and free the Rx/Tx resources */
1050 xgmac_free_dma_desc_rings(priv);
1057 * @skb : the socket buffer
1058 * @dev : device pointer
1059 * Description : Tx entry point of the driver.
1061 static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1063 struct xgmac_priv *priv = netdev_priv(dev);
1067 int nfrags = skb_shinfo(skb)->nr_frags;
1068 struct xgmac_dma_desc *desc, *first;
1069 unsigned int desc_flags;
1073 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1074 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1076 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1077 TXDESC_CSUM_ALL : 0;
1078 entry = priv->tx_head;
1079 desc = priv->dma_tx + entry;
1082 len = skb_headlen(skb);
1083 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1084 if (dma_mapping_error(priv->device, paddr)) {
1088 priv->tx_skbuff[entry] = skb;
1089 desc_set_buf_addr_and_size(desc, paddr, len);
1091 for (i = 0; i < nfrags; i++) {
1092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1096 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1098 if (dma_mapping_error(priv->device, paddr)) {
1103 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1104 desc = priv->dma_tx + entry;
1105 priv->tx_skbuff[entry] = NULL;
1107 desc_set_buf_addr_and_size(desc, paddr, len);
1108 if (i < (nfrags - 1))
1109 desc_set_tx_owner(desc, desc_flags);
1112 /* Interrupt on completition only for the latest segment */
1114 desc_set_tx_owner(desc, desc_flags |
1115 TXDESC_LAST_SEG | irq_flag);
1117 desc_flags |= TXDESC_LAST_SEG | irq_flag;
1119 /* Set owner on first desc last to avoid race condition */
1121 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1123 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1125 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1126 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1128 netif_stop_queue(dev);
1130 return NETDEV_TX_OK;
1133 static int xgmac_rx(struct xgmac_priv *priv, int limit)
1136 unsigned int count = 0;
1137 struct xgmac_dma_desc *p;
1139 while (count < limit) {
1141 struct sk_buff *skb;
1144 entry = priv->rx_tail;
1145 p = priv->dma_rx + entry;
1146 if (desc_get_owner(p))
1150 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1152 /* read the status of the incoming frame */
1153 ip_checksum = desc_get_rx_status(priv, p);
1154 if (ip_checksum < 0)
1157 skb = priv->rx_skbuff[entry];
1158 if (unlikely(!skb)) {
1159 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1162 priv->rx_skbuff[entry] = NULL;
1164 frame_len = desc_get_rx_frame_len(p);
1165 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1166 frame_len, ip_checksum);
1168 skb_put(skb, frame_len);
1169 dma_unmap_single(priv->device, desc_get_buf_addr(p),
1170 frame_len, DMA_FROM_DEVICE);
1172 skb->protocol = eth_type_trans(skb, priv->dev);
1173 skb->ip_summed = ip_checksum;
1174 if (ip_checksum == CHECKSUM_NONE)
1175 netif_receive_skb(skb);
1177 napi_gro_receive(&priv->napi, skb);
1180 xgmac_rx_refill(priv);
1186 * xgmac_poll - xgmac poll method (NAPI)
1187 * @napi : pointer to the napi structure.
1188 * @budget : maximum number of packets that the current CPU can receive from
1191 * This function implements the the reception process.
1192 * Also it runs the TX completion thread
1194 static int xgmac_poll(struct napi_struct *napi, int budget)
1196 struct xgmac_priv *priv = container_of(napi,
1197 struct xgmac_priv, napi);
1200 xgmac_tx_complete(priv);
1201 work_done = xgmac_rx(priv, budget);
1203 if (work_done < budget) {
1204 napi_complete(napi);
1205 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1212 * @dev : Pointer to net device structure
1213 * Description: this function is called when a packet transmission fails to
1214 * complete within a reasonable tmrate. The driver will mark the error in the
1215 * netdev structure and arrange for the device to be reset to a sane state
1216 * in order to transmit a new packet.
1218 static void xgmac_tx_timeout(struct net_device *dev)
1220 struct xgmac_priv *priv = netdev_priv(dev);
1222 /* Clear Tx resources and restart transmitting again */
1227 * xgmac_set_rx_mode - entry point for multicast addressing
1228 * @dev : pointer to the device structure
1230 * This function is a driver entry point which gets called by the kernel
1231 * whenever multicast addresses must be enabled/disabled.
1235 static void xgmac_set_rx_mode(struct net_device *dev)
1238 struct xgmac_priv *priv = netdev_priv(dev);
1239 void __iomem *ioaddr = priv->base;
1240 unsigned int value = 0;
1241 u32 hash_filter[XGMAC_NUM_HASH];
1243 struct netdev_hw_addr *ha;
1244 bool use_hash = false;
1246 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1247 netdev_mc_count(dev), netdev_uc_count(dev));
1249 if (dev->flags & IFF_PROMISC) {
1250 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
1254 memset(hash_filter, 0, sizeof(hash_filter));
1256 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
1258 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1260 netdev_for_each_uc_addr(ha, dev) {
1262 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1264 /* The most significant 4 bits determine the register to
1265 * use (H/L) while the other 5 bits determine the bit
1266 * within the register. */
1267 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1269 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1274 if (dev->flags & IFF_ALLMULTI) {
1275 value |= XGMAC_FRAME_FILTER_PM;
1279 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1281 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1283 netdev_for_each_mc_addr(ha, dev) {
1285 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1287 /* The most significant 4 bits determine the register to
1288 * use (H/L) while the other 5 bits determine the bit
1289 * within the register. */
1290 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1292 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1298 for (i = 0; i < XGMAC_NUM_HASH; i++)
1299 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1301 writel(value, ioaddr + XGMAC_FRAME_FILTER);
1305 * xgmac_change_mtu - entry point to change MTU size for the device.
1306 * @dev : device pointer.
1307 * @new_mtu : the new MTU size for the device.
1308 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1309 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1310 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1312 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1315 static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1317 struct xgmac_priv *priv = netdev_priv(dev);
1320 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
1321 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
1328 /* return early if the buffer sizes will not change */
1329 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1331 if (old_mtu == new_mtu)
1334 /* Stop everything, get ready to change the MTU */
1335 if (!netif_running(dev))
1338 /* Bring the interface down and then back up */
1340 return xgmac_open(dev);
1343 static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1346 struct net_device *dev = (struct net_device *)dev_id;
1347 struct xgmac_priv *priv = netdev_priv(dev);
1348 void __iomem *ioaddr = priv->base;
1350 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1351 if (intr_status & XGMAC_INT_STAT_PMT) {
1352 netdev_dbg(priv->dev, "received Magic frame\n");
1353 /* clear the PMT bits 5 and 6 by reading the PMT */
1354 readl(ioaddr + XGMAC_PMT);
1359 static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1362 bool tx_err = false;
1363 struct net_device *dev = (struct net_device *)dev_id;
1364 struct xgmac_priv *priv = netdev_priv(dev);
1365 struct xgmac_extra_stats *x = &priv->xstats;
1367 /* read the status register (CSR5) */
1368 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1369 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1370 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1372 /* It displays the DMA process states (CSR5 register) */
1373 /* ABNORMAL interrupts */
1374 if (unlikely(intr_status & DMA_STATUS_AIS)) {
1375 if (intr_status & DMA_STATUS_TJT) {
1376 netdev_err(priv->dev, "transmit jabber\n");
1379 if (intr_status & DMA_STATUS_RU)
1381 if (intr_status & DMA_STATUS_RPS) {
1382 netdev_err(priv->dev, "receive process stopped\n");
1383 x->rx_process_stopped++;
1385 if (intr_status & DMA_STATUS_ETI) {
1386 netdev_err(priv->dev, "transmit early interrupt\n");
1389 if (intr_status & DMA_STATUS_TPS) {
1390 netdev_err(priv->dev, "transmit process stopped\n");
1391 x->tx_process_stopped++;
1394 if (intr_status & DMA_STATUS_FBI) {
1395 netdev_err(priv->dev, "fatal bus error\n");
1396 x->fatal_bus_error++;
1404 /* TX/RX NORMAL interrupts */
1405 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1406 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1407 napi_schedule(&priv->napi);
1413 #ifdef CONFIG_NET_POLL_CONTROLLER
1414 /* Polling receive - used by NETCONSOLE and other diagnostic tools
1415 * to allow network I/O with interrupts disabled. */
1416 static void xgmac_poll_controller(struct net_device *dev)
1418 disable_irq(dev->irq);
1419 xgmac_interrupt(dev->irq, dev);
1420 enable_irq(dev->irq);
1424 static struct rtnl_link_stats64 *
1425 xgmac_get_stats64(struct net_device *dev,
1426 struct rtnl_link_stats64 *storage)
1428 struct xgmac_priv *priv = netdev_priv(dev);
1429 void __iomem *base = priv->base;
1432 spin_lock_bh(&priv->stats_lock);
1433 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1435 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1436 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1438 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1439 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1440 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1441 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1442 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1444 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1445 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1447 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1448 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1449 storage->tx_packets = count;
1450 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1452 writel(0, base + XGMAC_MMC_CTRL);
1453 spin_unlock_bh(&priv->stats_lock);
1457 static int xgmac_set_mac_address(struct net_device *dev, void *p)
1459 struct xgmac_priv *priv = netdev_priv(dev);
1460 void __iomem *ioaddr = priv->base;
1461 struct sockaddr *addr = p;
1463 if (!is_valid_ether_addr(addr->sa_data))
1464 return -EADDRNOTAVAIL;
1466 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1468 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1473 static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1476 struct xgmac_priv *priv = netdev_priv(dev);
1477 void __iomem *ioaddr = priv->base;
1478 u32 changed = dev->features ^ features;
1480 if (!(changed & NETIF_F_RXCSUM))
1483 ctrl = readl(ioaddr + XGMAC_CONTROL);
1484 if (features & NETIF_F_RXCSUM)
1485 ctrl |= XGMAC_CONTROL_IPC;
1487 ctrl &= ~XGMAC_CONTROL_IPC;
1488 writel(ctrl, ioaddr + XGMAC_CONTROL);
1493 static const struct net_device_ops xgmac_netdev_ops = {
1494 .ndo_open = xgmac_open,
1495 .ndo_start_xmit = xgmac_xmit,
1496 .ndo_stop = xgmac_stop,
1497 .ndo_change_mtu = xgmac_change_mtu,
1498 .ndo_set_rx_mode = xgmac_set_rx_mode,
1499 .ndo_tx_timeout = xgmac_tx_timeout,
1500 .ndo_get_stats64 = xgmac_get_stats64,
1501 #ifdef CONFIG_NET_POLL_CONTROLLER
1502 .ndo_poll_controller = xgmac_poll_controller,
1504 .ndo_set_mac_address = xgmac_set_mac_address,
1505 .ndo_set_features = xgmac_set_features,
1508 static int xgmac_ethtool_getsettings(struct net_device *dev,
1509 struct ethtool_cmd *cmd)
1512 cmd->duplex = DUPLEX_FULL;
1513 ethtool_cmd_speed_set(cmd, 10000);
1515 cmd->advertising = 0;
1516 cmd->transceiver = XCVR_INTERNAL;
1520 static void xgmac_get_pauseparam(struct net_device *netdev,
1521 struct ethtool_pauseparam *pause)
1523 struct xgmac_priv *priv = netdev_priv(netdev);
1525 pause->rx_pause = priv->rx_pause;
1526 pause->tx_pause = priv->tx_pause;
1529 static int xgmac_set_pauseparam(struct net_device *netdev,
1530 struct ethtool_pauseparam *pause)
1532 struct xgmac_priv *priv = netdev_priv(netdev);
1537 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1540 struct xgmac_stats {
1541 char stat_string[ETH_GSTRING_LEN];
1546 #define XGMAC_STAT(m) \
1547 { #m, offsetof(struct xgmac_priv, xstats.m), false }
1548 #define XGMAC_HW_STAT(m, reg_offset) \
1549 { #m, reg_offset, true }
1551 static const struct xgmac_stats xgmac_gstrings_stats[] = {
1552 XGMAC_STAT(tx_frame_flushed),
1553 XGMAC_STAT(tx_payload_error),
1554 XGMAC_STAT(tx_ip_header_error),
1555 XGMAC_STAT(tx_local_fault),
1556 XGMAC_STAT(tx_remote_fault),
1557 XGMAC_STAT(tx_early),
1558 XGMAC_STAT(tx_process_stopped),
1559 XGMAC_STAT(tx_jabber),
1560 XGMAC_STAT(rx_buf_unav),
1561 XGMAC_STAT(rx_process_stopped),
1562 XGMAC_STAT(rx_payload_error),
1563 XGMAC_STAT(rx_ip_header_error),
1564 XGMAC_STAT(rx_da_filter_fail),
1565 XGMAC_STAT(rx_sa_filter_fail),
1566 XGMAC_STAT(fatal_bus_error),
1567 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1568 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1569 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1570 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1571 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1573 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1575 static void xgmac_get_ethtool_stats(struct net_device *dev,
1576 struct ethtool_stats *dummy,
1579 struct xgmac_priv *priv = netdev_priv(dev);
1583 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1584 if (xgmac_gstrings_stats[i].is_reg)
1585 *data++ = readl(priv->base +
1586 xgmac_gstrings_stats[i].stat_offset);
1588 *data++ = *(u32 *)(p +
1589 xgmac_gstrings_stats[i].stat_offset);
1593 static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1597 return XGMAC_STATS_LEN;
1603 static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1609 switch (stringset) {
1611 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1612 memcpy(p, xgmac_gstrings_stats[i].stat_string,
1614 p += ETH_GSTRING_LEN;
1623 static void xgmac_get_wol(struct net_device *dev,
1624 struct ethtool_wolinfo *wol)
1626 struct xgmac_priv *priv = netdev_priv(dev);
1628 if (device_can_wakeup(priv->device)) {
1629 wol->supported = WAKE_MAGIC | WAKE_UCAST;
1630 wol->wolopts = priv->wolopts;
1634 static int xgmac_set_wol(struct net_device *dev,
1635 struct ethtool_wolinfo *wol)
1637 struct xgmac_priv *priv = netdev_priv(dev);
1638 u32 support = WAKE_MAGIC | WAKE_UCAST;
1640 if (!device_can_wakeup(priv->device))
1643 if (wol->wolopts & ~support)
1646 priv->wolopts = wol->wolopts;
1649 device_set_wakeup_enable(priv->device, 1);
1650 enable_irq_wake(dev->irq);
1652 device_set_wakeup_enable(priv->device, 0);
1653 disable_irq_wake(dev->irq);
1659 static const struct ethtool_ops xgmac_ethtool_ops = {
1660 .get_settings = xgmac_ethtool_getsettings,
1661 .get_link = ethtool_op_get_link,
1662 .get_pauseparam = xgmac_get_pauseparam,
1663 .set_pauseparam = xgmac_set_pauseparam,
1664 .get_ethtool_stats = xgmac_get_ethtool_stats,
1665 .get_strings = xgmac_get_strings,
1666 .get_wol = xgmac_get_wol,
1667 .set_wol = xgmac_set_wol,
1668 .get_sset_count = xgmac_get_sset_count,
1673 * @pdev: platform device pointer
1674 * Description: the driver is initialized through platform_device.
1676 static int xgmac_probe(struct platform_device *pdev)
1679 struct resource *res;
1680 struct net_device *ndev = NULL;
1681 struct xgmac_priv *priv = NULL;
1684 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1688 if (!request_mem_region(res->start, resource_size(res), pdev->name))
1691 ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1697 SET_NETDEV_DEV(ndev, &pdev->dev);
1698 priv = netdev_priv(ndev);
1699 platform_set_drvdata(pdev, ndev);
1701 ndev->netdev_ops = &xgmac_netdev_ops;
1702 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1703 spin_lock_init(&priv->stats_lock);
1705 priv->device = &pdev->dev;
1710 priv->base = ioremap(res->start, resource_size(res));
1712 netdev_err(ndev, "ioremap failed\n");
1717 uid = readl(priv->base + XGMAC_VERSION);
1718 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1720 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1721 ndev->irq = platform_get_irq(pdev, 0);
1722 if (ndev->irq == -ENXIO) {
1723 netdev_err(ndev, "No irq resource\n");
1728 ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1729 dev_name(&pdev->dev), ndev);
1731 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1736 priv->pmt_irq = platform_get_irq(pdev, 1);
1737 if (priv->pmt_irq == -ENXIO) {
1738 netdev_err(ndev, "No pmt irq resource\n");
1739 ret = priv->pmt_irq;
1743 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1744 dev_name(&pdev->dev), ndev);
1746 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1747 priv->pmt_irq, ret);
1751 device_set_wakeup_capable(&pdev->dev, 1);
1752 if (device_can_wakeup(priv->device))
1753 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1755 ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1756 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1757 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1759 ndev->features |= ndev->hw_features;
1760 ndev->priv_flags |= IFF_UNICAST_FLT;
1762 /* Get the MAC address */
1763 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
1764 if (!is_valid_ether_addr(ndev->dev_addr))
1765 netdev_warn(ndev, "MAC address %pM not valid",
1768 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1769 ret = register_netdev(ndev);
1776 netif_napi_del(&priv->napi);
1777 free_irq(priv->pmt_irq, ndev);
1779 free_irq(ndev->irq, ndev);
1781 iounmap(priv->base);
1785 release_mem_region(res->start, resource_size(res));
1786 platform_set_drvdata(pdev, NULL);
1792 * @pdev: platform device pointer
1793 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1794 * changes the link status, releases the DMA descriptor rings,
1795 * unregisters the MDIO bus and unmaps the allocated memory.
1797 static int xgmac_remove(struct platform_device *pdev)
1799 struct net_device *ndev = platform_get_drvdata(pdev);
1800 struct xgmac_priv *priv = netdev_priv(ndev);
1801 struct resource *res;
1803 xgmac_mac_disable(priv->base);
1805 /* Free the IRQ lines */
1806 free_irq(ndev->irq, ndev);
1807 free_irq(priv->pmt_irq, ndev);
1809 platform_set_drvdata(pdev, NULL);
1810 unregister_netdev(ndev);
1811 netif_napi_del(&priv->napi);
1813 iounmap(priv->base);
1814 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1815 release_mem_region(res->start, resource_size(res));
1822 #ifdef CONFIG_PM_SLEEP
1823 static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1825 unsigned int pmt = 0;
1827 if (mode & WAKE_MAGIC)
1828 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
1829 if (mode & WAKE_UCAST)
1830 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1832 writel(pmt, ioaddr + XGMAC_PMT);
1835 static int xgmac_suspend(struct device *dev)
1837 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1838 struct xgmac_priv *priv = netdev_priv(ndev);
1841 if (!ndev || !netif_running(ndev))
1844 netif_device_detach(ndev);
1845 napi_disable(&priv->napi);
1846 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1848 if (device_may_wakeup(priv->device)) {
1849 /* Stop TX/RX DMA Only */
1850 value = readl(priv->base + XGMAC_DMA_CONTROL);
1851 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1852 writel(value, priv->base + XGMAC_DMA_CONTROL);
1854 xgmac_pmt(priv->base, priv->wolopts);
1856 xgmac_mac_disable(priv->base);
1861 static int xgmac_resume(struct device *dev)
1863 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1864 struct xgmac_priv *priv = netdev_priv(ndev);
1865 void __iomem *ioaddr = priv->base;
1867 if (!netif_running(ndev))
1870 xgmac_pmt(ioaddr, 0);
1872 /* Enable the MAC and DMA */
1873 xgmac_mac_enable(ioaddr);
1874 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1875 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1877 netif_device_attach(ndev);
1878 napi_enable(&priv->napi);
1883 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1884 #define XGMAC_PM_OPS (&xgmac_pm_ops)
1886 #define XGMAC_PM_OPS NULL
1887 #endif /* CONFIG_PM_SLEEP */
1889 static const struct of_device_id xgmac_of_match[] = {
1890 { .compatible = "calxeda,hb-xgmac", },
1893 MODULE_DEVICE_TABLE(of, xgmac_of_match);
1895 static struct platform_driver xgmac_driver = {
1897 .name = "calxedaxgmac",
1898 .of_match_table = xgmac_of_match,
1900 .probe = xgmac_probe,
1901 .remove = xgmac_remove,
1902 .driver.pm = XGMAC_PM_OPS,
1905 module_platform_driver(xgmac_driver);
1907 MODULE_AUTHOR("Calxeda, Inc.");
1908 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1909 MODULE_LICENSE("GPL v2");