]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
Merge tag 'nfs-for-4.11-2' of git://git.linux-nfs.org/projects/anna/linux-nfs
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190         u32 avail;
191
192         if (priv->dirty_tx > priv->cur_tx)
193                 avail = priv->dirty_tx - priv->cur_tx - 1;
194         else
195                 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197         return avail;
198 }
199
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202         u32 dirty;
203
204         if (priv->dirty_rx <= priv->cur_rx)
205                 dirty = priv->cur_rx - priv->dirty_rx;
206         else
207                 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209         return dirty;
210 }
211
212 /**
213  * stmmac_hw_fix_mac_speed - callback for speed selection
214  * @priv: driver private structure
215  * Description: on some platforms (e.g. ST), some HW system configuration
216  * registers have to be set according to the link speed negotiated.
217  */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220         struct net_device *ndev = priv->dev;
221         struct phy_device *phydev = ndev->phydev;
222
223         if (likely(priv->plat->fix_mac_speed))
224                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228  * stmmac_enable_eee_mode - check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode in case of
231  * EEE.
232  */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235         /* Check and enter in LPI mode */
236         if ((priv->dirty_tx == priv->cur_tx) &&
237             (priv->tx_path_in_lpi_mode == false))
238                 priv->hw->mac->set_eee_mode(priv->hw,
239                                             priv->plat->en_tx_lpi_clockgating);
240 }
241
242 /**
243  * stmmac_disable_eee_mode - disable and exit from LPI mode
244  * @priv: driver private structure
245  * Description: this function is to exit and disable EEE in case of
246  * LPI state is true. This is called by the xmit.
247  */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250         priv->hw->mac->reset_eee_mode(priv->hw);
251         del_timer_sync(&priv->eee_ctrl_timer);
252         priv->tx_path_in_lpi_mode = false;
253 }
254
255 /**
256  * stmmac_eee_ctrl_timer - EEE TX SW timer.
257  * @arg : data hook
258  * Description:
259  *  if there is no data transfer and if we are not in LPI state,
260  *  then MAC Transmitter can be moved to LPI state.
261  */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266         stmmac_enable_eee_mode(priv);
267         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269
270 /**
271  * stmmac_eee_init - init EEE
272  * @priv: driver private structure
273  * Description:
274  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
275  *  can also manage EEE, this function enable the LPI state and start related
276  *  timer.
277  */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280         struct net_device *ndev = priv->dev;
281         unsigned long flags;
282         bool ret = false;
283
284         /* Using PCS we cannot dial with the phy registers at this stage
285          * so we do not support extra feature like EEE.
286          */
287         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288             (priv->hw->pcs == STMMAC_PCS_TBI) ||
289             (priv->hw->pcs == STMMAC_PCS_RTBI))
290                 goto out;
291
292         /* MAC core supports the EEE feature. */
293         if (priv->dma_cap.eee) {
294                 int tx_lpi_timer = priv->tx_lpi_timer;
295
296                 /* Check if the PHY supports EEE */
297                 if (phy_init_eee(ndev->phydev, 1)) {
298                         /* To manage at run-time if the EEE cannot be supported
299                          * anymore (for example because the lp caps have been
300                          * changed).
301                          * In that case the driver disable own timers.
302                          */
303                         spin_lock_irqsave(&priv->lock, flags);
304                         if (priv->eee_active) {
305                                 netdev_dbg(priv->dev, "disable EEE\n");
306                                 del_timer_sync(&priv->eee_ctrl_timer);
307                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
308                                                              tx_lpi_timer);
309                         }
310                         priv->eee_active = 0;
311                         spin_unlock_irqrestore(&priv->lock, flags);
312                         goto out;
313                 }
314                 /* Activate the EEE and start timers */
315                 spin_lock_irqsave(&priv->lock, flags);
316                 if (!priv->eee_active) {
317                         priv->eee_active = 1;
318                         setup_timer(&priv->eee_ctrl_timer,
319                                     stmmac_eee_ctrl_timer,
320                                     (unsigned long)priv);
321                         mod_timer(&priv->eee_ctrl_timer,
322                                   STMMAC_LPI_T(eee_timer));
323
324                         priv->hw->mac->set_eee_timer(priv->hw,
325                                                      STMMAC_DEFAULT_LIT_LS,
326                                                      tx_lpi_timer);
327                 }
328                 /* Set HW EEE according to the speed */
329                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330
331                 ret = true;
332                 spin_unlock_irqrestore(&priv->lock, flags);
333
334                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335         }
336 out:
337         return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @p : descriptor pointer
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349                                    struct dma_desc *p, struct sk_buff *skb)
350 {
351         struct skb_shared_hwtstamps shhwtstamp;
352         u64 ns;
353
354         if (!priv->hwts_tx_en)
355                 return;
356
357         /* exit if skb doesn't support hw tstamp */
358         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359                 return;
360
361         /* check tx tstamp status */
362         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363                 /* get the valid tstamp */
364                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365
366                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
368
369                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370                 /* pass tstamp to stack */
371                 skb_tstamp_tx(skb, &shhwtstamp);
372         }
373
374         return;
375 }
376
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378  * @priv: driver private structure
379  * @p : descriptor pointer
380  * @np : next descriptor pointer
381  * @skb : the socket buffer
382  * Description :
383  * This function will read received packet's timestamp from the descriptor
384  * and pass it to stack. It also perform some sanity checks.
385  */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387                                    struct dma_desc *np, struct sk_buff *skb)
388 {
389         struct skb_shared_hwtstamps *shhwtstamp = NULL;
390         u64 ns;
391
392         if (!priv->hwts_rx_en)
393                 return;
394
395         /* Check if timestamp is available */
396         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397                 /* For GMAC4, the valid timestamp is from CTX next desc. */
398                 if (priv->plat->has_gmac4)
399                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400                 else
401                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402
403                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404                 shhwtstamp = skb_hwtstamps(skb);
405                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407         } else  {
408                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409         }
410 }
411
412 /**
413  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
414  *  @dev: device pointer.
415  *  @ifr: An IOCTL specific structure, that can contain a pointer to
416  *  a proprietary structure used to pass information to the driver.
417  *  Description:
418  *  This function configures the MAC to enable/disable both outgoing(TX)
419  *  and incoming(RX) packets time stamping based on user input.
420  *  Return Value:
421  *  0 on success and an appropriate -ve integer on failure.
422  */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425         struct stmmac_priv *priv = netdev_priv(dev);
426         struct hwtstamp_config config;
427         struct timespec64 now;
428         u64 temp = 0;
429         u32 ptp_v2 = 0;
430         u32 tstamp_all = 0;
431         u32 ptp_over_ipv4_udp = 0;
432         u32 ptp_over_ipv6_udp = 0;
433         u32 ptp_over_ethernet = 0;
434         u32 snap_type_sel = 0;
435         u32 ts_master_en = 0;
436         u32 ts_event_en = 0;
437         u32 value = 0;
438         u32 sec_inc;
439
440         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441                 netdev_alert(priv->dev, "No support for HW time stamping\n");
442                 priv->hwts_tx_en = 0;
443                 priv->hwts_rx_en = 0;
444
445                 return -EOPNOTSUPP;
446         }
447
448         if (copy_from_user(&config, ifr->ifr_data,
449                            sizeof(struct hwtstamp_config)))
450                 return -EFAULT;
451
452         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453                    __func__, config.flags, config.tx_type, config.rx_filter);
454
455         /* reserved for future extensions */
456         if (config.flags)
457                 return -EINVAL;
458
459         if (config.tx_type != HWTSTAMP_TX_OFF &&
460             config.tx_type != HWTSTAMP_TX_ON)
461                 return -ERANGE;
462
463         if (priv->adv_ts) {
464                 switch (config.rx_filter) {
465                 case HWTSTAMP_FILTER_NONE:
466                         /* time stamp no incoming packet at all */
467                         config.rx_filter = HWTSTAMP_FILTER_NONE;
468                         break;
469
470                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471                         /* PTP v1, UDP, any kind of event packet */
472                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473                         /* take time stamp for all event messages */
474                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478                         break;
479
480                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481                         /* PTP v1, UDP, Sync packet */
482                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483                         /* take time stamp for SYNC messages only */
484                         ts_event_en = PTP_TCR_TSEVNTENA;
485
486                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488                         break;
489
490                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491                         /* PTP v1, UDP, Delay_req packet */
492                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493                         /* take time stamp for Delay_Req messages only */
494                         ts_master_en = PTP_TCR_TSMSTRENA;
495                         ts_event_en = PTP_TCR_TSEVNTENA;
496
497                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499                         break;
500
501                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502                         /* PTP v2, UDP, any kind of event packet */
503                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504                         ptp_v2 = PTP_TCR_TSVER2ENA;
505                         /* take time stamp for all event messages */
506                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510                         break;
511
512                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513                         /* PTP v2, UDP, Sync packet */
514                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515                         ptp_v2 = PTP_TCR_TSVER2ENA;
516                         /* take time stamp for SYNC messages only */
517                         ts_event_en = PTP_TCR_TSEVNTENA;
518
519                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521                         break;
522
523                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524                         /* PTP v2, UDP, Delay_req packet */
525                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526                         ptp_v2 = PTP_TCR_TSVER2ENA;
527                         /* take time stamp for Delay_Req messages only */
528                         ts_master_en = PTP_TCR_TSMSTRENA;
529                         ts_event_en = PTP_TCR_TSEVNTENA;
530
531                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533                         break;
534
535                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
536                         /* PTP v2/802.AS1 any layer, any kind of event packet */
537                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538                         ptp_v2 = PTP_TCR_TSVER2ENA;
539                         /* take time stamp for all event messages */
540                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544                         ptp_over_ethernet = PTP_TCR_TSIPENA;
545                         break;
546
547                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
548                         /* PTP v2/802.AS1, any layer, Sync packet */
549                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550                         ptp_v2 = PTP_TCR_TSVER2ENA;
551                         /* take time stamp for SYNC messages only */
552                         ts_event_en = PTP_TCR_TSEVNTENA;
553
554                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556                         ptp_over_ethernet = PTP_TCR_TSIPENA;
557                         break;
558
559                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560                         /* PTP v2/802.AS1, any layer, Delay_req packet */
561                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562                         ptp_v2 = PTP_TCR_TSVER2ENA;
563                         /* take time stamp for Delay_Req messages only */
564                         ts_master_en = PTP_TCR_TSMSTRENA;
565                         ts_event_en = PTP_TCR_TSEVNTENA;
566
567                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569                         ptp_over_ethernet = PTP_TCR_TSIPENA;
570                         break;
571
572                 case HWTSTAMP_FILTER_ALL:
573                         /* time stamp any incoming packet */
574                         config.rx_filter = HWTSTAMP_FILTER_ALL;
575                         tstamp_all = PTP_TCR_TSENALL;
576                         break;
577
578                 default:
579                         return -ERANGE;
580                 }
581         } else {
582                 switch (config.rx_filter) {
583                 case HWTSTAMP_FILTER_NONE:
584                         config.rx_filter = HWTSTAMP_FILTER_NONE;
585                         break;
586                 default:
587                         /* PTP v1, UDP, any kind of event packet */
588                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589                         break;
590                 }
591         }
592         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594
595         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597         else {
598                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599                          tstamp_all | ptp_v2 | ptp_over_ethernet |
600                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601                          ts_master_en | snap_type_sel);
602                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603
604                 /* program Sub Second Increment reg */
605                 sec_inc = priv->hw->ptp->config_sub_second_increment(
606                         priv->ptpaddr, priv->plat->clk_ptp_rate,
607                         priv->plat->has_gmac4);
608                 temp = div_u64(1000000000ULL, sec_inc);
609
610                 /* calculate default added value:
611                  * formula is :
612                  * addend = (2^32)/freq_div_ratio;
613                  * where, freq_div_ratio = 1e9ns/sec_inc
614                  */
615                 temp = (u64)(temp << 32);
616                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617                 priv->hw->ptp->config_addend(priv->ptpaddr,
618                                              priv->default_addend);
619
620                 /* initialize system time */
621                 ktime_get_real_ts64(&now);
622
623                 /* lower 32 bits of tv_sec are safe until y2106 */
624                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625                                             now.tv_nsec);
626         }
627
628         return copy_to_user(ifr->ifr_data, &config,
629                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631
632 /**
633  * stmmac_init_ptp - init PTP
634  * @priv: driver private structure
635  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636  * This is done by looking at the HW cap. register.
637  * This function also registers the ptp driver.
638  */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642                 return -EOPNOTSUPP;
643
644         priv->adv_ts = 0;
645         /* Check if adv_ts can be enabled for dwmac 4.x core */
646         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647                 priv->adv_ts = 1;
648         /* Dwmac 3.x core with extend_desc can support adv_ts */
649         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650                 priv->adv_ts = 1;
651
652         if (priv->dma_cap.time_stamp)
653                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654
655         if (priv->adv_ts)
656                 netdev_info(priv->dev,
657                             "IEEE 1588-2008 Advanced Timestamp supported\n");
658
659         priv->hw->ptp = &stmmac_ptp;
660         priv->hwts_tx_en = 0;
661         priv->hwts_rx_en = 0;
662
663         stmmac_ptp_register(priv);
664
665         return 0;
666 }
667
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670         if (priv->plat->clk_ptp_ref)
671                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
672         stmmac_ptp_unregister(priv);
673 }
674
675 /**
676  * stmmac_adjust_link - adjusts the link parameters
677  * @dev: net device structure
678  * Description: this is the helper called by the physical abstraction layer
679  * drivers to communicate the phy link status. According the speed and duplex
680  * this driver can invoke registered glue-logic as well.
681  * It also invoke the eee initialization because it could happen when switch
682  * on different networks (that are eee capable).
683  */
684 static void stmmac_adjust_link(struct net_device *dev)
685 {
686         struct stmmac_priv *priv = netdev_priv(dev);
687         struct phy_device *phydev = dev->phydev;
688         unsigned long flags;
689         int new_state = 0;
690         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691
692         if (!phydev)
693                 return;
694
695         spin_lock_irqsave(&priv->lock, flags);
696
697         if (phydev->link) {
698                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
699
700                 /* Now we make sure that we can be in full duplex mode.
701                  * If not, we operate in half-duplex mode. */
702                 if (phydev->duplex != priv->oldduplex) {
703                         new_state = 1;
704                         if (!(phydev->duplex))
705                                 ctrl &= ~priv->hw->link.duplex;
706                         else
707                                 ctrl |= priv->hw->link.duplex;
708                         priv->oldduplex = phydev->duplex;
709                 }
710                 /* Flow Control operation */
711                 if (phydev->pause)
712                         priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
713                                                  fc, pause_time);
714
715                 if (phydev->speed != priv->speed) {
716                         new_state = 1;
717                         switch (phydev->speed) {
718                         case 1000:
719                                 if (priv->plat->has_gmac ||
720                                     priv->plat->has_gmac4)
721                                         ctrl &= ~priv->hw->link.port;
722                                 break;
723                         case 100:
724                                 if (priv->plat->has_gmac ||
725                                     priv->plat->has_gmac4) {
726                                         ctrl |= priv->hw->link.port;
727                                         ctrl |= priv->hw->link.speed;
728                                 } else {
729                                         ctrl &= ~priv->hw->link.port;
730                                 }
731                                 break;
732                         case 10:
733                                 if (priv->plat->has_gmac ||
734                                     priv->plat->has_gmac4) {
735                                         ctrl |= priv->hw->link.port;
736                                         ctrl &= ~(priv->hw->link.speed);
737                                 } else {
738                                         ctrl &= ~priv->hw->link.port;
739                                 }
740                                 break;
741                         default:
742                                 netif_warn(priv, link, priv->dev,
743                                            "broken speed: %d\n", phydev->speed);
744                                 phydev->speed = SPEED_UNKNOWN;
745                                 break;
746                         }
747                         if (phydev->speed != SPEED_UNKNOWN)
748                                 stmmac_hw_fix_mac_speed(priv);
749                         priv->speed = phydev->speed;
750                 }
751
752                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
753
754                 if (!priv->oldlink) {
755                         new_state = 1;
756                         priv->oldlink = 1;
757                 }
758         } else if (priv->oldlink) {
759                 new_state = 1;
760                 priv->oldlink = 0;
761                 priv->speed = SPEED_UNKNOWN;
762                 priv->oldduplex = DUPLEX_UNKNOWN;
763         }
764
765         if (new_state && netif_msg_link(priv))
766                 phy_print_status(phydev);
767
768         spin_unlock_irqrestore(&priv->lock, flags);
769
770         if (phydev->is_pseudo_fixed_link)
771                 /* Stop PHY layer to call the hook to adjust the link in case
772                  * of a switch is attached to the stmmac driver.
773                  */
774                 phydev->irq = PHY_IGNORE_INTERRUPT;
775         else
776                 /* At this stage, init the EEE if supported.
777                  * Never called in case of fixed_link.
778                  */
779                 priv->eee_enabled = stmmac_eee_init(priv);
780 }
781
782 /**
783  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
784  * @priv: driver private structure
785  * Description: this is to verify if the HW supports the PCS.
786  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
787  * configured for the TBI, RTBI, or SGMII PHY interface.
788  */
789 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
790 {
791         int interface = priv->plat->interface;
792
793         if (priv->dma_cap.pcs) {
794                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
795                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
796                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
797                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
798                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
799                         priv->hw->pcs = STMMAC_PCS_RGMII;
800                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
801                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
802                         priv->hw->pcs = STMMAC_PCS_SGMII;
803                 }
804         }
805 }
806
807 /**
808  * stmmac_init_phy - PHY initialization
809  * @dev: net device structure
810  * Description: it initializes the driver's PHY state, and attaches the PHY
811  * to the mac driver.
812  *  Return value:
813  *  0 on success
814  */
815 static int stmmac_init_phy(struct net_device *dev)
816 {
817         struct stmmac_priv *priv = netdev_priv(dev);
818         struct phy_device *phydev;
819         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
820         char bus_id[MII_BUS_ID_SIZE];
821         int interface = priv->plat->interface;
822         int max_speed = priv->plat->max_speed;
823         priv->oldlink = 0;
824         priv->speed = SPEED_UNKNOWN;
825         priv->oldduplex = DUPLEX_UNKNOWN;
826
827         if (priv->plat->phy_node) {
828                 phydev = of_phy_connect(dev, priv->plat->phy_node,
829                                         &stmmac_adjust_link, 0, interface);
830         } else {
831                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
832                          priv->plat->bus_id);
833
834                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
835                          priv->plat->phy_addr);
836                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
837                            phy_id_fmt);
838
839                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
840                                      interface);
841         }
842
843         if (IS_ERR_OR_NULL(phydev)) {
844                 netdev_err(priv->dev, "Could not attach to PHY\n");
845                 if (!phydev)
846                         return -ENODEV;
847
848                 return PTR_ERR(phydev);
849         }
850
851         /* Stop Advertising 1000BASE Capability if interface is not GMII */
852         if ((interface == PHY_INTERFACE_MODE_MII) ||
853             (interface == PHY_INTERFACE_MODE_RMII) ||
854                 (max_speed < 1000 && max_speed > 0))
855                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
856                                          SUPPORTED_1000baseT_Full);
857
858         /*
859          * Broken HW is sometimes missing the pull-up resistor on the
860          * MDIO line, which results in reads to non-existent devices returning
861          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
862          * device as well.
863          * Note: phydev->phy_id is the result of reading the UID PHY registers.
864          */
865         if (!priv->plat->phy_node && phydev->phy_id == 0) {
866                 phy_disconnect(phydev);
867                 return -ENODEV;
868         }
869
870         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
871          * subsequent PHY polling, make sure we force a link transition if
872          * we have a UP/DOWN/UP transition
873          */
874         if (phydev->is_pseudo_fixed_link)
875                 phydev->irq = PHY_POLL;
876
877         phy_attached_info(phydev);
878         return 0;
879 }
880
881 static void stmmac_display_rings(struct stmmac_priv *priv)
882 {
883         void *head_rx, *head_tx;
884
885         if (priv->extend_desc) {
886                 head_rx = (void *)priv->dma_erx;
887                 head_tx = (void *)priv->dma_etx;
888         } else {
889                 head_rx = (void *)priv->dma_rx;
890                 head_tx = (void *)priv->dma_tx;
891         }
892
893         /* Display Rx ring */
894         priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895         /* Display Tx ring */
896         priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 }
898
899 static int stmmac_set_bfsize(int mtu, int bufsize)
900 {
901         int ret = bufsize;
902
903         if (mtu >= BUF_SIZE_4KiB)
904                 ret = BUF_SIZE_8KiB;
905         else if (mtu >= BUF_SIZE_2KiB)
906                 ret = BUF_SIZE_4KiB;
907         else if (mtu > DEFAULT_BUFSIZE)
908                 ret = BUF_SIZE_2KiB;
909         else
910                 ret = DEFAULT_BUFSIZE;
911
912         return ret;
913 }
914
915 /**
916  * stmmac_clear_descriptors - clear descriptors
917  * @priv: driver private structure
918  * Description: this function is called to clear the tx and rx descriptors
919  * in case of both basic and extended descriptors are used.
920  */
921 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922 {
923         int i;
924
925         /* Clear the Rx/Tx descriptors */
926         for (i = 0; i < DMA_RX_SIZE; i++)
927                 if (priv->extend_desc)
928                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929                                                      priv->use_riwt, priv->mode,
930                                                      (i == DMA_RX_SIZE - 1));
931                 else
932                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933                                                      priv->use_riwt, priv->mode,
934                                                      (i == DMA_RX_SIZE - 1));
935         for (i = 0; i < DMA_TX_SIZE; i++)
936                 if (priv->extend_desc)
937                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938                                                      priv->mode,
939                                                      (i == DMA_TX_SIZE - 1));
940                 else
941                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942                                                      priv->mode,
943                                                      (i == DMA_TX_SIZE - 1));
944 }
945
946 /**
947  * stmmac_init_rx_buffers - init the RX descriptor buffer.
948  * @priv: driver private structure
949  * @p: descriptor pointer
950  * @i: descriptor index
951  * @flags: gfp flag.
952  * Description: this function is called to allocate a receive buffer, perform
953  * the DMA mapping and init the descriptor.
954  */
955 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956                                   int i, gfp_t flags)
957 {
958         struct sk_buff *skb;
959
960         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961         if (!skb) {
962                 netdev_err(priv->dev,
963                            "%s: Rx init fails; skb is NULL\n", __func__);
964                 return -ENOMEM;
965         }
966         priv->rx_skbuff[i] = skb;
967         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968                                                 priv->dma_buf_sz,
969                                                 DMA_FROM_DEVICE);
970         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
971                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972                 dev_kfree_skb_any(skb);
973                 return -EINVAL;
974         }
975
976         if (priv->synopsys_id >= DWMAC_CORE_4_00)
977                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
978         else
979                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
980
981         if ((priv->hw->mode->init_desc3) &&
982             (priv->dma_buf_sz == BUF_SIZE_16KiB))
983                 priv->hw->mode->init_desc3(p);
984
985         return 0;
986 }
987
988 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
989 {
990         if (priv->rx_skbuff[i]) {
991                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
992                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
993                 dev_kfree_skb_any(priv->rx_skbuff[i]);
994         }
995         priv->rx_skbuff[i] = NULL;
996 }
997
998 /**
999  * init_dma_desc_rings - init the RX/TX descriptor rings
1000  * @dev: net device structure
1001  * @flags: gfp flag.
1002  * Description: this function initializes the DMA RX/TX descriptors
1003  * and allocates the socket buffers. It supports the chained and ring
1004  * modes.
1005  */
1006 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007 {
1008         int i;
1009         struct stmmac_priv *priv = netdev_priv(dev);
1010         unsigned int bfsize = 0;
1011         int ret = -ENOMEM;
1012
1013         if (priv->hw->mode->set_16kib_bfsize)
1014                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1015
1016         if (bfsize < BUF_SIZE_16KiB)
1017                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1018
1019         priv->dma_buf_sz = bfsize;
1020
1021         netif_dbg(priv, probe, priv->dev,
1022                   "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023                   __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024
1025         /* RX INITIALIZATION */
1026         netif_dbg(priv, probe, priv->dev,
1027                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028
1029         for (i = 0; i < DMA_RX_SIZE; i++) {
1030                 struct dma_desc *p;
1031                 if (priv->extend_desc)
1032                         p = &((priv->dma_erx + i)->basic);
1033                 else
1034                         p = priv->dma_rx + i;
1035
1036                 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1037                 if (ret)
1038                         goto err_init_rx_buffers;
1039
1040                 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1041                           priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1042                           (unsigned int)priv->rx_skbuff_dma[i]);
1043         }
1044         priv->cur_rx = 0;
1045         priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046         buf_sz = bfsize;
1047
1048         /* Setup the chained descriptor addresses */
1049         if (priv->mode == STMMAC_CHAIN_MODE) {
1050                 if (priv->extend_desc) {
1051                         priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1052                                              DMA_RX_SIZE, 1);
1053                         priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1054                                              DMA_TX_SIZE, 1);
1055                 } else {
1056                         priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1057                                              DMA_RX_SIZE, 0);
1058                         priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1059                                              DMA_TX_SIZE, 0);
1060                 }
1061         }
1062
1063         /* TX INITIALIZATION */
1064         for (i = 0; i < DMA_TX_SIZE; i++) {
1065                 struct dma_desc *p;
1066                 if (priv->extend_desc)
1067                         p = &((priv->dma_etx + i)->basic);
1068                 else
1069                         p = priv->dma_tx + i;
1070
1071                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1072                         p->des0 = 0;
1073                         p->des1 = 0;
1074                         p->des2 = 0;
1075                         p->des3 = 0;
1076                 } else {
1077                         p->des2 = 0;
1078                 }
1079
1080                 priv->tx_skbuff_dma[i].buf = 0;
1081                 priv->tx_skbuff_dma[i].map_as_page = false;
1082                 priv->tx_skbuff_dma[i].len = 0;
1083                 priv->tx_skbuff_dma[i].last_segment = false;
1084                 priv->tx_skbuff[i] = NULL;
1085         }
1086
1087         priv->dirty_tx = 0;
1088         priv->cur_tx = 0;
1089         netdev_reset_queue(priv->dev);
1090
1091         stmmac_clear_descriptors(priv);
1092
1093         if (netif_msg_hw(priv))
1094                 stmmac_display_rings(priv);
1095
1096         return 0;
1097 err_init_rx_buffers:
1098         while (--i >= 0)
1099                 stmmac_free_rx_buffers(priv, i);
1100         return ret;
1101 }
1102
1103 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1104 {
1105         int i;
1106
1107         for (i = 0; i < DMA_RX_SIZE; i++)
1108                 stmmac_free_rx_buffers(priv, i);
1109 }
1110
1111 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112 {
1113         int i;
1114
1115         for (i = 0; i < DMA_TX_SIZE; i++) {
1116                 if (priv->tx_skbuff_dma[i].buf) {
1117                         if (priv->tx_skbuff_dma[i].map_as_page)
1118                                 dma_unmap_page(priv->device,
1119                                                priv->tx_skbuff_dma[i].buf,
1120                                                priv->tx_skbuff_dma[i].len,
1121                                                DMA_TO_DEVICE);
1122                         else
1123                                 dma_unmap_single(priv->device,
1124                                                  priv->tx_skbuff_dma[i].buf,
1125                                                  priv->tx_skbuff_dma[i].len,
1126                                                  DMA_TO_DEVICE);
1127                 }
1128
1129                 if (priv->tx_skbuff[i]) {
1130                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1131                         priv->tx_skbuff[i] = NULL;
1132                         priv->tx_skbuff_dma[i].buf = 0;
1133                         priv->tx_skbuff_dma[i].map_as_page = false;
1134                 }
1135         }
1136 }
1137
1138 /**
1139  * alloc_dma_desc_resources - alloc TX/RX resources.
1140  * @priv: private structure
1141  * Description: according to which descriptor can be used (extend or basic)
1142  * this function allocates the resources for TX and RX paths. In case of
1143  * reception, for example, it pre-allocated the RX socket buffer in order to
1144  * allow zero-copy mechanism.
1145  */
1146 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1147 {
1148         int ret = -ENOMEM;
1149
1150         priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1151                                             GFP_KERNEL);
1152         if (!priv->rx_skbuff_dma)
1153                 return -ENOMEM;
1154
1155         priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1156                                         GFP_KERNEL);
1157         if (!priv->rx_skbuff)
1158                 goto err_rx_skbuff;
1159
1160         priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1161                                             sizeof(*priv->tx_skbuff_dma),
1162                                             GFP_KERNEL);
1163         if (!priv->tx_skbuff_dma)
1164                 goto err_tx_skbuff_dma;
1165
1166         priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167                                         GFP_KERNEL);
1168         if (!priv->tx_skbuff)
1169                 goto err_tx_skbuff;
1170
1171         if (priv->extend_desc) {
1172                 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173                                                     sizeof(struct
1174                                                            dma_extended_desc),
1175                                                     &priv->dma_rx_phy,
1176                                                     GFP_KERNEL);
1177                 if (!priv->dma_erx)
1178                         goto err_dma;
1179
1180                 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1181                                                     sizeof(struct
1182                                                            dma_extended_desc),
1183                                                     &priv->dma_tx_phy,
1184                                                     GFP_KERNEL);
1185                 if (!priv->dma_etx) {
1186                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1187                                           sizeof(struct dma_extended_desc),
1188                                           priv->dma_erx, priv->dma_rx_phy);
1189                         goto err_dma;
1190                 }
1191         } else {
1192                 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1193                                                    sizeof(struct dma_desc),
1194                                                    &priv->dma_rx_phy,
1195                                                    GFP_KERNEL);
1196                 if (!priv->dma_rx)
1197                         goto err_dma;
1198
1199                 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1200                                                    sizeof(struct dma_desc),
1201                                                    &priv->dma_tx_phy,
1202                                                    GFP_KERNEL);
1203                 if (!priv->dma_tx) {
1204                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1205                                           sizeof(struct dma_desc),
1206                                           priv->dma_rx, priv->dma_rx_phy);
1207                         goto err_dma;
1208                 }
1209         }
1210
1211         return 0;
1212
1213 err_dma:
1214         kfree(priv->tx_skbuff);
1215 err_tx_skbuff:
1216         kfree(priv->tx_skbuff_dma);
1217 err_tx_skbuff_dma:
1218         kfree(priv->rx_skbuff);
1219 err_rx_skbuff:
1220         kfree(priv->rx_skbuff_dma);
1221         return ret;
1222 }
1223
1224 static void free_dma_desc_resources(struct stmmac_priv *priv)
1225 {
1226         /* Release the DMA TX/RX socket buffers */
1227         dma_free_rx_skbufs(priv);
1228         dma_free_tx_skbufs(priv);
1229
1230         /* Free DMA regions of consistent memory previously allocated */
1231         if (!priv->extend_desc) {
1232                 dma_free_coherent(priv->device,
1233                                   DMA_TX_SIZE * sizeof(struct dma_desc),
1234                                   priv->dma_tx, priv->dma_tx_phy);
1235                 dma_free_coherent(priv->device,
1236                                   DMA_RX_SIZE * sizeof(struct dma_desc),
1237                                   priv->dma_rx, priv->dma_rx_phy);
1238         } else {
1239                 dma_free_coherent(priv->device, DMA_TX_SIZE *
1240                                   sizeof(struct dma_extended_desc),
1241                                   priv->dma_etx, priv->dma_tx_phy);
1242                 dma_free_coherent(priv->device, DMA_RX_SIZE *
1243                                   sizeof(struct dma_extended_desc),
1244                                   priv->dma_erx, priv->dma_rx_phy);
1245         }
1246         kfree(priv->rx_skbuff_dma);
1247         kfree(priv->rx_skbuff);
1248         kfree(priv->tx_skbuff_dma);
1249         kfree(priv->tx_skbuff);
1250 }
1251
1252 /**
1253  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1254  *  @priv: driver private structure
1255  *  Description: It is used for enabling the rx queues in the MAC
1256  */
1257 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258 {
1259         int rx_count = priv->dma_cap.number_rx_queues;
1260         int queue = 0;
1261
1262         /* If GMAC does not have multiple queues, then this is not necessary*/
1263         if (rx_count == 1)
1264                 return;
1265
1266         /**
1267          *  If the core is synthesized with multiple rx queues / multiple
1268          *  dma channels, then rx queues will be disabled by default.
1269          *  For now only rx queue 0 is enabled.
1270          */
1271         priv->hw->mac->rx_queue_enable(priv->hw, queue);
1272 }
1273
1274 /**
1275  *  stmmac_dma_operation_mode - HW DMA operation mode
1276  *  @priv: driver private structure
1277  *  Description: it is used for configuring the DMA operation mode register in
1278  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1279  */
1280 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1281 {
1282         int rxfifosz = priv->plat->rx_fifo_size;
1283
1284         if (priv->plat->force_thresh_dma_mode)
1285                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1286         else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1287                 /*
1288                  * In case of GMAC, SF mode can be enabled
1289                  * to perform the TX COE in HW. This depends on:
1290                  * 1) TX COE if actually supported
1291                  * 2) There is no bugged Jumbo frame support
1292                  *    that needs to not insert csum in the TDES.
1293                  */
1294                 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1295                                         rxfifosz);
1296                 priv->xstats.threshold = SF_DMA_MODE;
1297         } else
1298                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1299                                         rxfifosz);
1300 }
1301
1302 /**
1303  * stmmac_tx_clean - to manage the transmission completion
1304  * @priv: driver private structure
1305  * Description: it reclaims the transmit resources after transmission completes.
1306  */
1307 static void stmmac_tx_clean(struct stmmac_priv *priv)
1308 {
1309         unsigned int bytes_compl = 0, pkts_compl = 0;
1310         unsigned int entry = priv->dirty_tx;
1311
1312         netif_tx_lock(priv->dev);
1313
1314         priv->xstats.tx_clean++;
1315
1316         while (entry != priv->cur_tx) {
1317                 struct sk_buff *skb = priv->tx_skbuff[entry];
1318                 struct dma_desc *p;
1319                 int status;
1320
1321                 if (priv->extend_desc)
1322                         p = (struct dma_desc *)(priv->dma_etx + entry);
1323                 else
1324                         p = priv->dma_tx + entry;
1325
1326                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1327                                                       &priv->xstats, p,
1328                                                       priv->ioaddr);
1329                 /* Check if the descriptor is owned by the DMA */
1330                 if (unlikely(status & tx_dma_own))
1331                         break;
1332
1333                 /* Just consider the last segment and ...*/
1334                 if (likely(!(status & tx_not_ls))) {
1335                         /* ... verify the status error condition */
1336                         if (unlikely(status & tx_err)) {
1337                                 priv->dev->stats.tx_errors++;
1338                         } else {
1339                                 priv->dev->stats.tx_packets++;
1340                                 priv->xstats.tx_pkt_n++;
1341                         }
1342                         stmmac_get_tx_hwtstamp(priv, p, skb);
1343                 }
1344
1345                 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1346                         if (priv->tx_skbuff_dma[entry].map_as_page)
1347                                 dma_unmap_page(priv->device,
1348                                                priv->tx_skbuff_dma[entry].buf,
1349                                                priv->tx_skbuff_dma[entry].len,
1350                                                DMA_TO_DEVICE);
1351                         else
1352                                 dma_unmap_single(priv->device,
1353                                                  priv->tx_skbuff_dma[entry].buf,
1354                                                  priv->tx_skbuff_dma[entry].len,
1355                                                  DMA_TO_DEVICE);
1356                         priv->tx_skbuff_dma[entry].buf = 0;
1357                         priv->tx_skbuff_dma[entry].len = 0;
1358                         priv->tx_skbuff_dma[entry].map_as_page = false;
1359                 }
1360
1361                 if (priv->hw->mode->clean_desc3)
1362                         priv->hw->mode->clean_desc3(priv, p);
1363
1364                 priv->tx_skbuff_dma[entry].last_segment = false;
1365                 priv->tx_skbuff_dma[entry].is_jumbo = false;
1366
1367                 if (likely(skb != NULL)) {
1368                         pkts_compl++;
1369                         bytes_compl += skb->len;
1370                         dev_consume_skb_any(skb);
1371                         priv->tx_skbuff[entry] = NULL;
1372                 }
1373
1374                 priv->hw->desc->release_tx_desc(p, priv->mode);
1375
1376                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1377         }
1378         priv->dirty_tx = entry;
1379
1380         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1381
1382         if (unlikely(netif_queue_stopped(priv->dev) &&
1383             stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1384                 netif_dbg(priv, tx_done, priv->dev,
1385                           "%s: restart transmit\n", __func__);
1386                 netif_wake_queue(priv->dev);
1387         }
1388
1389         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1390                 stmmac_enable_eee_mode(priv);
1391                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1392         }
1393         netif_tx_unlock(priv->dev);
1394 }
1395
1396 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1397 {
1398         priv->hw->dma->enable_dma_irq(priv->ioaddr);
1399 }
1400
1401 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1402 {
1403         priv->hw->dma->disable_dma_irq(priv->ioaddr);
1404 }
1405
1406 /**
1407  * stmmac_tx_err - to manage the tx error
1408  * @priv: driver private structure
1409  * Description: it cleans the descriptors and restarts the transmission
1410  * in case of transmission errors.
1411  */
1412 static void stmmac_tx_err(struct stmmac_priv *priv)
1413 {
1414         int i;
1415         netif_stop_queue(priv->dev);
1416
1417         priv->hw->dma->stop_tx(priv->ioaddr);
1418         dma_free_tx_skbufs(priv);
1419         for (i = 0; i < DMA_TX_SIZE; i++)
1420                 if (priv->extend_desc)
1421                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1422                                                      priv->mode,
1423                                                      (i == DMA_TX_SIZE - 1));
1424                 else
1425                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1426                                                      priv->mode,
1427                                                      (i == DMA_TX_SIZE - 1));
1428         priv->dirty_tx = 0;
1429         priv->cur_tx = 0;
1430         netdev_reset_queue(priv->dev);
1431         priv->hw->dma->start_tx(priv->ioaddr);
1432
1433         priv->dev->stats.tx_errors++;
1434         netif_wake_queue(priv->dev);
1435 }
1436
1437 /**
1438  * stmmac_dma_interrupt - DMA ISR
1439  * @priv: driver private structure
1440  * Description: this is the DMA ISR. It is called by the main ISR.
1441  * It calls the dwmac dma routine and schedule poll method in case of some
1442  * work can be done.
1443  */
1444 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1445 {
1446         int status;
1447         int rxfifosz = priv->plat->rx_fifo_size;
1448
1449         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1450         if (likely((status & handle_rx)) || (status & handle_tx)) {
1451                 if (likely(napi_schedule_prep(&priv->napi))) {
1452                         stmmac_disable_dma_irq(priv);
1453                         __napi_schedule(&priv->napi);
1454                 }
1455         }
1456         if (unlikely(status & tx_hard_error_bump_tc)) {
1457                 /* Try to bump up the dma threshold on this failure */
1458                 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1459                     (tc <= 256)) {
1460                         tc += 64;
1461                         if (priv->plat->force_thresh_dma_mode)
1462                                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1463                                                         rxfifosz);
1464                         else
1465                                 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1466                                                         SF_DMA_MODE, rxfifosz);
1467                         priv->xstats.threshold = tc;
1468                 }
1469         } else if (unlikely(status == tx_hard_error))
1470                 stmmac_tx_err(priv);
1471 }
1472
1473 /**
1474  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1475  * @priv: driver private structure
1476  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1477  */
1478 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1479 {
1480         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1481                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1482
1483         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1484                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1485                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1486         } else {
1487                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1488                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1489         }
1490
1491         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1492
1493         if (priv->dma_cap.rmon) {
1494                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1495                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1496         } else
1497                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1498 }
1499
1500 /**
1501  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1502  * @priv: driver private structure
1503  * Description: select the Enhanced/Alternate or Normal descriptors.
1504  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1505  * supported by the HW capability register.
1506  */
1507 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1508 {
1509         if (priv->plat->enh_desc) {
1510                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1511
1512                 /* GMAC older than 3.50 has no extended descriptors */
1513                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1514                         dev_info(priv->device, "Enabled extended descriptors\n");
1515                         priv->extend_desc = 1;
1516                 } else
1517                         dev_warn(priv->device, "Extended descriptors not supported\n");
1518
1519                 priv->hw->desc = &enh_desc_ops;
1520         } else {
1521                 dev_info(priv->device, "Normal descriptors\n");
1522                 priv->hw->desc = &ndesc_ops;
1523         }
1524 }
1525
1526 /**
1527  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1528  * @priv: driver private structure
1529  * Description:
1530  *  new GMAC chip generations have a new register to indicate the
1531  *  presence of the optional feature/functions.
1532  *  This can be also used to override the value passed through the
1533  *  platform and necessary for old MAC10/100 and GMAC chips.
1534  */
1535 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1536 {
1537         u32 ret = 0;
1538
1539         if (priv->hw->dma->get_hw_feature) {
1540                 priv->hw->dma->get_hw_feature(priv->ioaddr,
1541                                               &priv->dma_cap);
1542                 ret = 1;
1543         }
1544
1545         return ret;
1546 }
1547
1548 /**
1549  * stmmac_check_ether_addr - check if the MAC addr is valid
1550  * @priv: driver private structure
1551  * Description:
1552  * it is to verify if the MAC address is valid, in case of failures it
1553  * generates a random MAC address
1554  */
1555 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1556 {
1557         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1558                 priv->hw->mac->get_umac_addr(priv->hw,
1559                                              priv->dev->dev_addr, 0);
1560                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1561                         eth_hw_addr_random(priv->dev);
1562                 netdev_info(priv->dev, "device MAC address %pM\n",
1563                             priv->dev->dev_addr);
1564         }
1565 }
1566
1567 /**
1568  * stmmac_init_dma_engine - DMA init.
1569  * @priv: driver private structure
1570  * Description:
1571  * It inits the DMA invoking the specific MAC/GMAC callback.
1572  * Some DMA parameters can be passed from the platform;
1573  * in case of these are not passed a default is kept for the MAC or GMAC.
1574  */
1575 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1576 {
1577         int atds = 0;
1578         int ret = 0;
1579
1580         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1581                 dev_err(priv->device, "Invalid DMA configuration\n");
1582                 return -EINVAL;
1583         }
1584
1585         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1586                 atds = 1;
1587
1588         ret = priv->hw->dma->reset(priv->ioaddr);
1589         if (ret) {
1590                 dev_err(priv->device, "Failed to reset the dma\n");
1591                 return ret;
1592         }
1593
1594         priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1595                             priv->dma_tx_phy, priv->dma_rx_phy, atds);
1596
1597         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1598                 priv->rx_tail_addr = priv->dma_rx_phy +
1599                             (DMA_RX_SIZE * sizeof(struct dma_desc));
1600                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1601                                                STMMAC_CHAN0);
1602
1603                 priv->tx_tail_addr = priv->dma_tx_phy +
1604                             (DMA_TX_SIZE * sizeof(struct dma_desc));
1605                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1606                                                STMMAC_CHAN0);
1607         }
1608
1609         if (priv->plat->axi && priv->hw->dma->axi)
1610                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1611
1612         return ret;
1613 }
1614
1615 /**
1616  * stmmac_tx_timer - mitigation sw timer for tx.
1617  * @data: data pointer
1618  * Description:
1619  * This is the timer handler to directly invoke the stmmac_tx_clean.
1620  */
1621 static void stmmac_tx_timer(unsigned long data)
1622 {
1623         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1624
1625         stmmac_tx_clean(priv);
1626 }
1627
1628 /**
1629  * stmmac_init_tx_coalesce - init tx mitigation options.
1630  * @priv: driver private structure
1631  * Description:
1632  * This inits the transmit coalesce parameters: i.e. timer rate,
1633  * timer handler and default threshold used for enabling the
1634  * interrupt on completion bit.
1635  */
1636 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1637 {
1638         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1639         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1640         init_timer(&priv->txtimer);
1641         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1642         priv->txtimer.data = (unsigned long)priv;
1643         priv->txtimer.function = stmmac_tx_timer;
1644         add_timer(&priv->txtimer);
1645 }
1646
1647 /**
1648  * stmmac_hw_setup - setup mac in a usable state.
1649  *  @dev : pointer to the device structure.
1650  *  Description:
1651  *  this is the main function to setup the HW in a usable state because the
1652  *  dma engine is reset, the core registers are configured (e.g. AXI,
1653  *  Checksum features, timers). The DMA is ready to start receiving and
1654  *  transmitting.
1655  *  Return value:
1656  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1657  *  file on failure.
1658  */
1659 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1660 {
1661         struct stmmac_priv *priv = netdev_priv(dev);
1662         int ret;
1663
1664         /* DMA initialization and SW reset */
1665         ret = stmmac_init_dma_engine(priv);
1666         if (ret < 0) {
1667                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1668                            __func__);
1669                 return ret;
1670         }
1671
1672         /* Copy the MAC addr into the HW  */
1673         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1674
1675         /* PS and related bits will be programmed according to the speed */
1676         if (priv->hw->pcs) {
1677                 int speed = priv->plat->mac_port_sel_speed;
1678
1679                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1680                     (speed == SPEED_1000)) {
1681                         priv->hw->ps = speed;
1682                 } else {
1683                         dev_warn(priv->device, "invalid port speed\n");
1684                         priv->hw->ps = 0;
1685                 }
1686         }
1687
1688         /* Initialize the MAC Core */
1689         priv->hw->mac->core_init(priv->hw, dev->mtu);
1690
1691         /* Initialize MAC RX Queues */
1692         if (priv->hw->mac->rx_queue_enable)
1693                 stmmac_mac_enable_rx_queues(priv);
1694
1695         ret = priv->hw->mac->rx_ipc(priv->hw);
1696         if (!ret) {
1697                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1698                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1699                 priv->hw->rx_csum = 0;
1700         }
1701
1702         /* Enable the MAC Rx/Tx */
1703         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1704                 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1705         else
1706                 stmmac_set_mac(priv->ioaddr, true);
1707
1708         /* Set the HW DMA mode and the COE */
1709         stmmac_dma_operation_mode(priv);
1710
1711         stmmac_mmc_setup(priv);
1712
1713         if (init_ptp) {
1714                 ret = stmmac_init_ptp(priv);
1715                 if (ret == -EOPNOTSUPP)
1716                         netdev_warn(priv->dev, "PTP not supported by HW\n");
1717                 else if (ret)
1718                         netdev_warn(priv->dev, "PTP init failed\n");
1719         }
1720
1721 #ifdef CONFIG_DEBUG_FS
1722         ret = stmmac_init_fs(dev);
1723         if (ret < 0)
1724                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1725                             __func__);
1726 #endif
1727         /* Start the ball rolling... */
1728         netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1729         priv->hw->dma->start_tx(priv->ioaddr);
1730         priv->hw->dma->start_rx(priv->ioaddr);
1731
1732         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1733
1734         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1735                 priv->rx_riwt = MAX_DMA_RIWT;
1736                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1737         }
1738
1739         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1740                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1741
1742         /*  set TX ring length */
1743         if (priv->hw->dma->set_tx_ring_len)
1744                 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1745                                                (DMA_TX_SIZE - 1));
1746         /*  set RX ring length */
1747         if (priv->hw->dma->set_rx_ring_len)
1748                 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1749                                                (DMA_RX_SIZE - 1));
1750         /* Enable TSO */
1751         if (priv->tso)
1752                 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1753
1754         return 0;
1755 }
1756
1757 /**
1758  *  stmmac_open - open entry point of the driver
1759  *  @dev : pointer to the device structure.
1760  *  Description:
1761  *  This function is the open entry point of the driver.
1762  *  Return value:
1763  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1764  *  file on failure.
1765  */
1766 static int stmmac_open(struct net_device *dev)
1767 {
1768         struct stmmac_priv *priv = netdev_priv(dev);
1769         int ret;
1770
1771         stmmac_check_ether_addr(priv);
1772
1773         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1774             priv->hw->pcs != STMMAC_PCS_TBI &&
1775             priv->hw->pcs != STMMAC_PCS_RTBI) {
1776                 ret = stmmac_init_phy(dev);
1777                 if (ret) {
1778                         netdev_err(priv->dev,
1779                                    "%s: Cannot attach to PHY (error: %d)\n",
1780                                    __func__, ret);
1781                         return ret;
1782                 }
1783         }
1784
1785         /* Extra statistics */
1786         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1787         priv->xstats.threshold = tc;
1788
1789         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1790         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1791
1792         ret = alloc_dma_desc_resources(priv);
1793         if (ret < 0) {
1794                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1795                            __func__);
1796                 goto dma_desc_error;
1797         }
1798
1799         ret = init_dma_desc_rings(dev, GFP_KERNEL);
1800         if (ret < 0) {
1801                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1802                            __func__);
1803                 goto init_error;
1804         }
1805
1806         ret = stmmac_hw_setup(dev, true);
1807         if (ret < 0) {
1808                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1809                 goto init_error;
1810         }
1811
1812         stmmac_init_tx_coalesce(priv);
1813
1814         if (dev->phydev)
1815                 phy_start(dev->phydev);
1816
1817         /* Request the IRQ lines */
1818         ret = request_irq(dev->irq, stmmac_interrupt,
1819                           IRQF_SHARED, dev->name, dev);
1820         if (unlikely(ret < 0)) {
1821                 netdev_err(priv->dev,
1822                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1823                            __func__, dev->irq, ret);
1824                 goto init_error;
1825         }
1826
1827         /* Request the Wake IRQ in case of another line is used for WoL */
1828         if (priv->wol_irq != dev->irq) {
1829                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1830                                   IRQF_SHARED, dev->name, dev);
1831                 if (unlikely(ret < 0)) {
1832                         netdev_err(priv->dev,
1833                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1834                                    __func__, priv->wol_irq, ret);
1835                         goto wolirq_error;
1836                 }
1837         }
1838
1839         /* Request the IRQ lines */
1840         if (priv->lpi_irq > 0) {
1841                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1842                                   dev->name, dev);
1843                 if (unlikely(ret < 0)) {
1844                         netdev_err(priv->dev,
1845                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1846                                    __func__, priv->lpi_irq, ret);
1847                         goto lpiirq_error;
1848                 }
1849         }
1850
1851         napi_enable(&priv->napi);
1852         netif_start_queue(dev);
1853
1854         return 0;
1855
1856 lpiirq_error:
1857         if (priv->wol_irq != dev->irq)
1858                 free_irq(priv->wol_irq, dev);
1859 wolirq_error:
1860         free_irq(dev->irq, dev);
1861
1862 init_error:
1863         free_dma_desc_resources(priv);
1864 dma_desc_error:
1865         if (dev->phydev)
1866                 phy_disconnect(dev->phydev);
1867
1868         return ret;
1869 }
1870
1871 /**
1872  *  stmmac_release - close entry point of the driver
1873  *  @dev : device pointer.
1874  *  Description:
1875  *  This is the stop entry point of the driver.
1876  */
1877 static int stmmac_release(struct net_device *dev)
1878 {
1879         struct stmmac_priv *priv = netdev_priv(dev);
1880
1881         if (priv->eee_enabled)
1882                 del_timer_sync(&priv->eee_ctrl_timer);
1883
1884         /* Stop and disconnect the PHY */
1885         if (dev->phydev) {
1886                 phy_stop(dev->phydev);
1887                 phy_disconnect(dev->phydev);
1888         }
1889
1890         netif_stop_queue(dev);
1891
1892         napi_disable(&priv->napi);
1893
1894         del_timer_sync(&priv->txtimer);
1895
1896         /* Free the IRQ lines */
1897         free_irq(dev->irq, dev);
1898         if (priv->wol_irq != dev->irq)
1899                 free_irq(priv->wol_irq, dev);
1900         if (priv->lpi_irq > 0)
1901                 free_irq(priv->lpi_irq, dev);
1902
1903         /* Stop TX/RX DMA and clear the descriptors */
1904         priv->hw->dma->stop_tx(priv->ioaddr);
1905         priv->hw->dma->stop_rx(priv->ioaddr);
1906
1907         /* Release and free the Rx/Tx resources */
1908         free_dma_desc_resources(priv);
1909
1910         /* Disable the MAC Rx/Tx */
1911         stmmac_set_mac(priv->ioaddr, false);
1912
1913         netif_carrier_off(dev);
1914
1915 #ifdef CONFIG_DEBUG_FS
1916         stmmac_exit_fs(dev);
1917 #endif
1918
1919         stmmac_release_ptp(priv);
1920
1921         return 0;
1922 }
1923
1924 /**
1925  *  stmmac_tso_allocator - close entry point of the driver
1926  *  @priv: driver private structure
1927  *  @des: buffer start address
1928  *  @total_len: total length to fill in descriptors
1929  *  @last_segmant: condition for the last descriptor
1930  *  Description:
1931  *  This function fills descriptor and request new descriptors according to
1932  *  buffer length to fill
1933  */
1934 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1935                                  int total_len, bool last_segment)
1936 {
1937         struct dma_desc *desc;
1938         int tmp_len;
1939         u32 buff_size;
1940
1941         tmp_len = total_len;
1942
1943         while (tmp_len > 0) {
1944                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1945                 desc = priv->dma_tx + priv->cur_tx;
1946
1947                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
1948                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1949                             TSO_MAX_BUFF_SIZE : tmp_len;
1950
1951                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1952                         0, 1,
1953                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1954                         0, 0);
1955
1956                 tmp_len -= TSO_MAX_BUFF_SIZE;
1957         }
1958 }
1959
1960 /**
1961  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
1962  *  @skb : the socket buffer
1963  *  @dev : device pointer
1964  *  Description: this is the transmit function that is called on TSO frames
1965  *  (support available on GMAC4 and newer chips).
1966  *  Diagram below show the ring programming in case of TSO frames:
1967  *
1968  *  First Descriptor
1969  *   --------
1970  *   | DES0 |---> buffer1 = L2/L3/L4 header
1971  *   | DES1 |---> TCP Payload (can continue on next descr...)
1972  *   | DES2 |---> buffer 1 and 2 len
1973  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
1974  *   --------
1975  *      |
1976  *     ...
1977  *      |
1978  *   --------
1979  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
1980  *   | DES1 | --|
1981  *   | DES2 | --> buffer 1 and 2 len
1982  *   | DES3 |
1983  *   --------
1984  *
1985  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
1986  */
1987 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1988 {
1989         u32 pay_len, mss;
1990         int tmp_pay_len = 0;
1991         struct stmmac_priv *priv = netdev_priv(dev);
1992         int nfrags = skb_shinfo(skb)->nr_frags;
1993         unsigned int first_entry, des;
1994         struct dma_desc *desc, *first, *mss_desc = NULL;
1995         u8 proto_hdr_len;
1996         int i;
1997
1998         /* Compute header lengths */
1999         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2000
2001         /* Desc availability based on threshold should be enough safe */
2002         if (unlikely(stmmac_tx_avail(priv) <
2003                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2004                 if (!netif_queue_stopped(dev)) {
2005                         netif_stop_queue(dev);
2006                         /* This is a hard error, log it. */
2007                         netdev_err(priv->dev,
2008                                    "%s: Tx Ring full when queue awake\n",
2009                                    __func__);
2010                 }
2011                 return NETDEV_TX_BUSY;
2012         }
2013
2014         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2015
2016         mss = skb_shinfo(skb)->gso_size;
2017
2018         /* set new MSS value if needed */
2019         if (mss != priv->mss) {
2020                 mss_desc = priv->dma_tx + priv->cur_tx;
2021                 priv->hw->desc->set_mss(mss_desc, mss);
2022                 priv->mss = mss;
2023                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2024         }
2025
2026         if (netif_msg_tx_queued(priv)) {
2027                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2028                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2029                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2030                         skb->data_len);
2031         }
2032
2033         first_entry = priv->cur_tx;
2034
2035         desc = priv->dma_tx + first_entry;
2036         first = desc;
2037
2038         /* first descriptor: fill Headers on Buf1 */
2039         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2040                              DMA_TO_DEVICE);
2041         if (dma_mapping_error(priv->device, des))
2042                 goto dma_map_err;
2043
2044         priv->tx_skbuff_dma[first_entry].buf = des;
2045         priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2046         priv->tx_skbuff[first_entry] = skb;
2047
2048         first->des0 = cpu_to_le32(des);
2049
2050         /* Fill start of payload in buff2 of first descriptor */
2051         if (pay_len)
2052                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2053
2054         /* If needed take extra descriptors to fill the remaining payload */
2055         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2056
2057         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2058
2059         /* Prepare fragments */
2060         for (i = 0; i < nfrags; i++) {
2061                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2062
2063                 des = skb_frag_dma_map(priv->device, frag, 0,
2064                                        skb_frag_size(frag),
2065                                        DMA_TO_DEVICE);
2066
2067                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2068                                      (i == nfrags - 1));
2069
2070                 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2071                 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2072                 priv->tx_skbuff[priv->cur_tx] = NULL;
2073                 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2074         }
2075
2076         priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2077
2078         priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2079
2080         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2081                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2082                           __func__);
2083                 netif_stop_queue(dev);
2084         }
2085
2086         dev->stats.tx_bytes += skb->len;
2087         priv->xstats.tx_tso_frames++;
2088         priv->xstats.tx_tso_nfrags += nfrags;
2089
2090         /* Manage tx mitigation */
2091         priv->tx_count_frames += nfrags + 1;
2092         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2093                 mod_timer(&priv->txtimer,
2094                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2095         } else {
2096                 priv->tx_count_frames = 0;
2097                 priv->hw->desc->set_tx_ic(desc);
2098                 priv->xstats.tx_set_ic_bit++;
2099         }
2100
2101         if (!priv->hwts_tx_en)
2102                 skb_tx_timestamp(skb);
2103
2104         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2105                      priv->hwts_tx_en)) {
2106                 /* declare that device is doing timestamping */
2107                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2108                 priv->hw->desc->enable_tx_timestamp(first);
2109         }
2110
2111         /* Complete the first descriptor before granting the DMA */
2112         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2113                         proto_hdr_len,
2114                         pay_len,
2115                         1, priv->tx_skbuff_dma[first_entry].last_segment,
2116                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2117
2118         /* If context desc is used to change MSS */
2119         if (mss_desc)
2120                 priv->hw->desc->set_tx_owner(mss_desc);
2121
2122         /* The own bit must be the latest setting done when prepare the
2123          * descriptor and then barrier is needed to make sure that
2124          * all is coherent before granting the DMA engine.
2125          */
2126         dma_wmb();
2127
2128         if (netif_msg_pktdata(priv)) {
2129                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2130                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2131                         priv->cur_tx, first, nfrags);
2132
2133                 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2134                                              0);
2135
2136                 pr_info(">>> frame to be transmitted: ");
2137                 print_pkt(skb->data, skb_headlen(skb));
2138         }
2139
2140         netdev_sent_queue(dev, skb->len);
2141
2142         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2143                                        STMMAC_CHAN0);
2144
2145         return NETDEV_TX_OK;
2146
2147 dma_map_err:
2148         dev_err(priv->device, "Tx dma map failed\n");
2149         dev_kfree_skb(skb);
2150         priv->dev->stats.tx_dropped++;
2151         return NETDEV_TX_OK;
2152 }
2153
2154 /**
2155  *  stmmac_xmit - Tx entry point of the driver
2156  *  @skb : the socket buffer
2157  *  @dev : device pointer
2158  *  Description : this is the tx entry point of the driver.
2159  *  It programs the chain or the ring and supports oversized frames
2160  *  and SG feature.
2161  */
2162 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2163 {
2164         struct stmmac_priv *priv = netdev_priv(dev);
2165         unsigned int nopaged_len = skb_headlen(skb);
2166         int i, csum_insertion = 0, is_jumbo = 0;
2167         int nfrags = skb_shinfo(skb)->nr_frags;
2168         unsigned int entry, first_entry;
2169         struct dma_desc *desc, *first;
2170         unsigned int enh_desc;
2171         unsigned int des;
2172
2173         /* Manage oversized TCP frames for GMAC4 device */
2174         if (skb_is_gso(skb) && priv->tso) {
2175                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2176                         return stmmac_tso_xmit(skb, dev);
2177         }
2178
2179         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2180                 if (!netif_queue_stopped(dev)) {
2181                         netif_stop_queue(dev);
2182                         /* This is a hard error, log it. */
2183                         netdev_err(priv->dev,
2184                                    "%s: Tx Ring full when queue awake\n",
2185                                    __func__);
2186                 }
2187                 return NETDEV_TX_BUSY;
2188         }
2189
2190         if (priv->tx_path_in_lpi_mode)
2191                 stmmac_disable_eee_mode(priv);
2192
2193         entry = priv->cur_tx;
2194         first_entry = entry;
2195
2196         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2197
2198         if (likely(priv->extend_desc))
2199                 desc = (struct dma_desc *)(priv->dma_etx + entry);
2200         else
2201                 desc = priv->dma_tx + entry;
2202
2203         first = desc;
2204
2205         priv->tx_skbuff[first_entry] = skb;
2206
2207         enh_desc = priv->plat->enh_desc;
2208         /* To program the descriptors according to the size of the frame */
2209         if (enh_desc)
2210                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2211
2212         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2213                                          DWMAC_CORE_4_00)) {
2214                 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2215                 if (unlikely(entry < 0))
2216                         goto dma_map_err;
2217         }
2218
2219         for (i = 0; i < nfrags; i++) {
2220                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2221                 int len = skb_frag_size(frag);
2222                 bool last_segment = (i == (nfrags - 1));
2223
2224                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2225
2226                 if (likely(priv->extend_desc))
2227                         desc = (struct dma_desc *)(priv->dma_etx + entry);
2228                 else
2229                         desc = priv->dma_tx + entry;
2230
2231                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2232                                        DMA_TO_DEVICE);
2233                 if (dma_mapping_error(priv->device, des))
2234                         goto dma_map_err; /* should reuse desc w/o issues */
2235
2236                 priv->tx_skbuff[entry] = NULL;
2237
2238                 priv->tx_skbuff_dma[entry].buf = des;
2239                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2240                         desc->des0 = cpu_to_le32(des);
2241                 else
2242                         desc->des2 = cpu_to_le32(des);
2243
2244                 priv->tx_skbuff_dma[entry].map_as_page = true;
2245                 priv->tx_skbuff_dma[entry].len = len;
2246                 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2247
2248                 /* Prepare the descriptor and set the own bit too */
2249                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2250                                                 priv->mode, 1, last_segment);
2251         }
2252
2253         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2254
2255         priv->cur_tx = entry;
2256
2257         if (netif_msg_pktdata(priv)) {
2258                 void *tx_head;
2259
2260                 netdev_dbg(priv->dev,
2261                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2262                            __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2263                            entry, first, nfrags);
2264
2265                 if (priv->extend_desc)
2266                         tx_head = (void *)priv->dma_etx;
2267                 else
2268                         tx_head = (void *)priv->dma_tx;
2269
2270                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2271
2272                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2273                 print_pkt(skb->data, skb->len);
2274         }
2275
2276         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2277                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2278                           __func__);
2279                 netif_stop_queue(dev);
2280         }
2281
2282         dev->stats.tx_bytes += skb->len;
2283
2284         /* According to the coalesce parameter the IC bit for the latest
2285          * segment is reset and the timer re-started to clean the tx status.
2286          * This approach takes care about the fragments: desc is the first
2287          * element in case of no SG.
2288          */
2289         priv->tx_count_frames += nfrags + 1;
2290         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2291                 mod_timer(&priv->txtimer,
2292                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2293         } else {
2294                 priv->tx_count_frames = 0;
2295                 priv->hw->desc->set_tx_ic(desc);
2296                 priv->xstats.tx_set_ic_bit++;
2297         }
2298
2299         if (!priv->hwts_tx_en)
2300                 skb_tx_timestamp(skb);
2301
2302         /* Ready to fill the first descriptor and set the OWN bit w/o any
2303          * problems because all the descriptors are actually ready to be
2304          * passed to the DMA engine.
2305          */
2306         if (likely(!is_jumbo)) {
2307                 bool last_segment = (nfrags == 0);
2308
2309                 des = dma_map_single(priv->device, skb->data,
2310                                      nopaged_len, DMA_TO_DEVICE);
2311                 if (dma_mapping_error(priv->device, des))
2312                         goto dma_map_err;
2313
2314                 priv->tx_skbuff_dma[first_entry].buf = des;
2315                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2316                         first->des0 = cpu_to_le32(des);
2317                 else
2318                         first->des2 = cpu_to_le32(des);
2319
2320                 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2321                 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2322
2323                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2324                              priv->hwts_tx_en)) {
2325                         /* declare that device is doing timestamping */
2326                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2327                         priv->hw->desc->enable_tx_timestamp(first);
2328                 }
2329
2330                 /* Prepare the first descriptor setting the OWN bit too */
2331                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2332                                                 csum_insertion, priv->mode, 1,
2333                                                 last_segment);
2334
2335                 /* The own bit must be the latest setting done when prepare the
2336                  * descriptor and then barrier is needed to make sure that
2337                  * all is coherent before granting the DMA engine.
2338                  */
2339                 dma_wmb();
2340         }
2341
2342         netdev_sent_queue(dev, skb->len);
2343
2344         if (priv->synopsys_id < DWMAC_CORE_4_00)
2345                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2346         else
2347                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2348                                                STMMAC_CHAN0);
2349
2350         return NETDEV_TX_OK;
2351
2352 dma_map_err:
2353         netdev_err(priv->dev, "Tx DMA map failed\n");
2354         dev_kfree_skb(skb);
2355         priv->dev->stats.tx_dropped++;
2356         return NETDEV_TX_OK;
2357 }
2358
2359 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2360 {
2361         struct ethhdr *ehdr;
2362         u16 vlanid;
2363
2364         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2365             NETIF_F_HW_VLAN_CTAG_RX &&
2366             !__vlan_get_tag(skb, &vlanid)) {
2367                 /* pop the vlan tag */
2368                 ehdr = (struct ethhdr *)skb->data;
2369                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2370                 skb_pull(skb, VLAN_HLEN);
2371                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2372         }
2373 }
2374
2375
2376 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2377 {
2378         if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2379                 return 0;
2380
2381         return 1;
2382 }
2383
2384 /**
2385  * stmmac_rx_refill - refill used skb preallocated buffers
2386  * @priv: driver private structure
2387  * Description : this is to reallocate the skb for the reception process
2388  * that is based on zero-copy.
2389  */
2390 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2391 {
2392         int bfsize = priv->dma_buf_sz;
2393         unsigned int entry = priv->dirty_rx;
2394         int dirty = stmmac_rx_dirty(priv);
2395
2396         while (dirty-- > 0) {
2397                 struct dma_desc *p;
2398
2399                 if (priv->extend_desc)
2400                         p = (struct dma_desc *)(priv->dma_erx + entry);
2401                 else
2402                         p = priv->dma_rx + entry;
2403
2404                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2405                         struct sk_buff *skb;
2406
2407                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2408                         if (unlikely(!skb)) {
2409                                 /* so for a while no zero-copy! */
2410                                 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2411                                 if (unlikely(net_ratelimit()))
2412                                         dev_err(priv->device,
2413                                                 "fail to alloc skb entry %d\n",
2414                                                 entry);
2415                                 break;
2416                         }
2417
2418                         priv->rx_skbuff[entry] = skb;
2419                         priv->rx_skbuff_dma[entry] =
2420                             dma_map_single(priv->device, skb->data, bfsize,
2421                                            DMA_FROM_DEVICE);
2422                         if (dma_mapping_error(priv->device,
2423                                               priv->rx_skbuff_dma[entry])) {
2424                                 netdev_err(priv->dev, "Rx DMA map failed\n");
2425                                 dev_kfree_skb(skb);
2426                                 break;
2427                         }
2428
2429                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2430                                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2431                                 p->des1 = 0;
2432                         } else {
2433                                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2434                         }
2435                         if (priv->hw->mode->refill_desc3)
2436                                 priv->hw->mode->refill_desc3(priv, p);
2437
2438                         if (priv->rx_zeroc_thresh > 0)
2439                                 priv->rx_zeroc_thresh--;
2440
2441                         netif_dbg(priv, rx_status, priv->dev,
2442                                   "refill entry #%d\n", entry);
2443                 }
2444                 dma_wmb();
2445
2446                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2447                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2448                 else
2449                         priv->hw->desc->set_rx_owner(p);
2450
2451                 dma_wmb();
2452
2453                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2454         }
2455         priv->dirty_rx = entry;
2456 }
2457
2458 /**
2459  * stmmac_rx - manage the receive process
2460  * @priv: driver private structure
2461  * @limit: napi bugget.
2462  * Description :  this the function called by the napi poll method.
2463  * It gets all the frames inside the ring.
2464  */
2465 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2466 {
2467         unsigned int entry = priv->cur_rx;
2468         unsigned int next_entry;
2469         unsigned int count = 0;
2470         int coe = priv->hw->rx_csum;
2471
2472         if (netif_msg_rx_status(priv)) {
2473                 void *rx_head;
2474
2475                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2476                 if (priv->extend_desc)
2477                         rx_head = (void *)priv->dma_erx;
2478                 else
2479                         rx_head = (void *)priv->dma_rx;
2480
2481                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2482         }
2483         while (count < limit) {
2484                 int status;
2485                 struct dma_desc *p;
2486                 struct dma_desc *np;
2487
2488                 if (priv->extend_desc)
2489                         p = (struct dma_desc *)(priv->dma_erx + entry);
2490                 else
2491                         p = priv->dma_rx + entry;
2492
2493                 /* read the status of the incoming frame */
2494                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2495                                                    &priv->xstats, p);
2496                 /* check if managed by the DMA otherwise go ahead */
2497                 if (unlikely(status & dma_own))
2498                         break;
2499
2500                 count++;
2501
2502                 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2503                 next_entry = priv->cur_rx;
2504
2505                 if (priv->extend_desc)
2506                         np = (struct dma_desc *)(priv->dma_erx + next_entry);
2507                 else
2508                         np = priv->dma_rx + next_entry;
2509
2510                 prefetch(np);
2511
2512                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2513                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2514                                                            &priv->xstats,
2515                                                            priv->dma_erx +
2516                                                            entry);
2517                 if (unlikely(status == discard_frame)) {
2518                         priv->dev->stats.rx_errors++;
2519                         if (priv->hwts_rx_en && !priv->extend_desc) {
2520                                 /* DESC2 & DESC3 will be overwritten by device
2521                                  * with timestamp value, hence reinitialize
2522                                  * them in stmmac_rx_refill() function so that
2523                                  * device can reuse it.
2524                                  */
2525                                 priv->rx_skbuff[entry] = NULL;
2526                                 dma_unmap_single(priv->device,
2527                                                  priv->rx_skbuff_dma[entry],
2528                                                  priv->dma_buf_sz,
2529                                                  DMA_FROM_DEVICE);
2530                         }
2531                 } else {
2532                         struct sk_buff *skb;
2533                         int frame_len;
2534                         unsigned int des;
2535
2536                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2537                                 des = le32_to_cpu(p->des0);
2538                         else
2539                                 des = le32_to_cpu(p->des2);
2540
2541                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2542
2543                         /*  If frame length is greater than skb buffer size
2544                          *  (preallocated during init) then the packet is
2545                          *  ignored
2546                          */
2547                         if (frame_len > priv->dma_buf_sz) {
2548                                 netdev_err(priv->dev,
2549                                            "len %d larger than size (%d)\n",
2550                                            frame_len, priv->dma_buf_sz);
2551                                 priv->dev->stats.rx_length_errors++;
2552                                 break;
2553                         }
2554
2555                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2556                          * Type frames (LLC/LLC-SNAP)
2557                          */
2558                         if (unlikely(status != llc_snap))
2559                                 frame_len -= ETH_FCS_LEN;
2560
2561                         if (netif_msg_rx_status(priv)) {
2562                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2563                                            p, entry, des);
2564                                 if (frame_len > ETH_FRAME_LEN)
2565                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2566                                                    frame_len, status);
2567                         }
2568
2569                         /* The zero-copy is always used for all the sizes
2570                          * in case of GMAC4 because it needs
2571                          * to refill the used descriptors, always.
2572                          */
2573                         if (unlikely(!priv->plat->has_gmac4 &&
2574                                      ((frame_len < priv->rx_copybreak) ||
2575                                      stmmac_rx_threshold_count(priv)))) {
2576                                 skb = netdev_alloc_skb_ip_align(priv->dev,
2577                                                                 frame_len);
2578                                 if (unlikely(!skb)) {
2579                                         if (net_ratelimit())
2580                                                 dev_warn(priv->device,
2581                                                          "packet dropped\n");
2582                                         priv->dev->stats.rx_dropped++;
2583                                         break;
2584                                 }
2585
2586                                 dma_sync_single_for_cpu(priv->device,
2587                                                         priv->rx_skbuff_dma
2588                                                         [entry], frame_len,
2589                                                         DMA_FROM_DEVICE);
2590                                 skb_copy_to_linear_data(skb,
2591                                                         priv->
2592                                                         rx_skbuff[entry]->data,
2593                                                         frame_len);
2594
2595                                 skb_put(skb, frame_len);
2596                                 dma_sync_single_for_device(priv->device,
2597                                                            priv->rx_skbuff_dma
2598                                                            [entry], frame_len,
2599                                                            DMA_FROM_DEVICE);
2600                         } else {
2601                                 skb = priv->rx_skbuff[entry];
2602                                 if (unlikely(!skb)) {
2603                                         netdev_err(priv->dev,
2604                                                    "%s: Inconsistent Rx chain\n",
2605                                                    priv->dev->name);
2606                                         priv->dev->stats.rx_dropped++;
2607                                         break;
2608                                 }
2609                                 prefetch(skb->data - NET_IP_ALIGN);
2610                                 priv->rx_skbuff[entry] = NULL;
2611                                 priv->rx_zeroc_thresh++;
2612
2613                                 skb_put(skb, frame_len);
2614                                 dma_unmap_single(priv->device,
2615                                                  priv->rx_skbuff_dma[entry],
2616                                                  priv->dma_buf_sz,
2617                                                  DMA_FROM_DEVICE);
2618                         }
2619
2620                         if (netif_msg_pktdata(priv)) {
2621                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
2622                                            frame_len);
2623                                 print_pkt(skb->data, frame_len);
2624                         }
2625
2626                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
2627
2628                         stmmac_rx_vlan(priv->dev, skb);
2629
2630                         skb->protocol = eth_type_trans(skb, priv->dev);
2631
2632                         if (unlikely(!coe))
2633                                 skb_checksum_none_assert(skb);
2634                         else
2635                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2636
2637                         napi_gro_receive(&priv->napi, skb);
2638
2639                         priv->dev->stats.rx_packets++;
2640                         priv->dev->stats.rx_bytes += frame_len;
2641                 }
2642                 entry = next_entry;
2643         }
2644
2645         stmmac_rx_refill(priv);
2646
2647         priv->xstats.rx_pkt_n += count;
2648
2649         return count;
2650 }
2651
2652 /**
2653  *  stmmac_poll - stmmac poll method (NAPI)
2654  *  @napi : pointer to the napi structure.
2655  *  @budget : maximum number of packets that the current CPU can receive from
2656  *            all interfaces.
2657  *  Description :
2658  *  To look at the incoming frames and clear the tx resources.
2659  */
2660 static int stmmac_poll(struct napi_struct *napi, int budget)
2661 {
2662         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2663         int work_done = 0;
2664
2665         priv->xstats.napi_poll++;
2666         stmmac_tx_clean(priv);
2667
2668         work_done = stmmac_rx(priv, budget);
2669         if (work_done < budget) {
2670                 napi_complete_done(napi, work_done);
2671                 stmmac_enable_dma_irq(priv);
2672         }
2673         return work_done;
2674 }
2675
2676 /**
2677  *  stmmac_tx_timeout
2678  *  @dev : Pointer to net device structure
2679  *  Description: this function is called when a packet transmission fails to
2680  *   complete within a reasonable time. The driver will mark the error in the
2681  *   netdev structure and arrange for the device to be reset to a sane state
2682  *   in order to transmit a new packet.
2683  */
2684 static void stmmac_tx_timeout(struct net_device *dev)
2685 {
2686         struct stmmac_priv *priv = netdev_priv(dev);
2687
2688         /* Clear Tx resources and restart transmitting again */
2689         stmmac_tx_err(priv);
2690 }
2691
2692 /**
2693  *  stmmac_set_rx_mode - entry point for multicast addressing
2694  *  @dev : pointer to the device structure
2695  *  Description:
2696  *  This function is a driver entry point which gets called by the kernel
2697  *  whenever multicast addresses must be enabled/disabled.
2698  *  Return value:
2699  *  void.
2700  */
2701 static void stmmac_set_rx_mode(struct net_device *dev)
2702 {
2703         struct stmmac_priv *priv = netdev_priv(dev);
2704
2705         priv->hw->mac->set_filter(priv->hw, dev);
2706 }
2707
2708 /**
2709  *  stmmac_change_mtu - entry point to change MTU size for the device.
2710  *  @dev : device pointer.
2711  *  @new_mtu : the new MTU size for the device.
2712  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2713  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2714  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2715  *  Return value:
2716  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2717  *  file on failure.
2718  */
2719 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2720 {
2721         struct stmmac_priv *priv = netdev_priv(dev);
2722
2723         if (netif_running(dev)) {
2724                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
2725                 return -EBUSY;
2726         }
2727
2728         dev->mtu = new_mtu;
2729
2730         netdev_update_features(dev);
2731
2732         return 0;
2733 }
2734
2735 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2736                                              netdev_features_t features)
2737 {
2738         struct stmmac_priv *priv = netdev_priv(dev);
2739
2740         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2741                 features &= ~NETIF_F_RXCSUM;
2742
2743         if (!priv->plat->tx_coe)
2744                 features &= ~NETIF_F_CSUM_MASK;
2745
2746         /* Some GMAC devices have a bugged Jumbo frame support that
2747          * needs to have the Tx COE disabled for oversized frames
2748          * (due to limited buffer sizes). In this case we disable
2749          * the TX csum insertion in the TDES and not use SF.
2750          */
2751         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2752                 features &= ~NETIF_F_CSUM_MASK;
2753
2754         /* Disable tso if asked by ethtool */
2755         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2756                 if (features & NETIF_F_TSO)
2757                         priv->tso = true;
2758                 else
2759                         priv->tso = false;
2760         }
2761
2762         return features;
2763 }
2764
2765 static int stmmac_set_features(struct net_device *netdev,
2766                                netdev_features_t features)
2767 {
2768         struct stmmac_priv *priv = netdev_priv(netdev);
2769
2770         /* Keep the COE Type in case of csum is supporting */
2771         if (features & NETIF_F_RXCSUM)
2772                 priv->hw->rx_csum = priv->plat->rx_coe;
2773         else
2774                 priv->hw->rx_csum = 0;
2775         /* No check needed because rx_coe has been set before and it will be
2776          * fixed in case of issue.
2777          */
2778         priv->hw->mac->rx_ipc(priv->hw);
2779
2780         return 0;
2781 }
2782
2783 /**
2784  *  stmmac_interrupt - main ISR
2785  *  @irq: interrupt number.
2786  *  @dev_id: to pass the net device pointer.
2787  *  Description: this is the main driver interrupt service routine.
2788  *  It can call:
2789  *  o DMA service routine (to manage incoming frame reception and transmission
2790  *    status)
2791  *  o Core interrupts to manage: remote wake-up, management counter, LPI
2792  *    interrupts.
2793  */
2794 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2795 {
2796         struct net_device *dev = (struct net_device *)dev_id;
2797         struct stmmac_priv *priv = netdev_priv(dev);
2798
2799         if (priv->irq_wake)
2800                 pm_wakeup_event(priv->device, 0);
2801
2802         if (unlikely(!dev)) {
2803                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2804                 return IRQ_NONE;
2805         }
2806
2807         /* To handle GMAC own interrupts */
2808         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2809                 int status = priv->hw->mac->host_irq_status(priv->hw,
2810                                                             &priv->xstats);
2811                 if (unlikely(status)) {
2812                         /* For LPI we need to save the tx status */
2813                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2814                                 priv->tx_path_in_lpi_mode = true;
2815                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2816                                 priv->tx_path_in_lpi_mode = false;
2817                         if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2818                                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2819                                                         priv->rx_tail_addr,
2820                                                         STMMAC_CHAN0);
2821                 }
2822
2823                 /* PCS link status */
2824                 if (priv->hw->pcs) {
2825                         if (priv->xstats.pcs_link)
2826                                 netif_carrier_on(dev);
2827                         else
2828                                 netif_carrier_off(dev);
2829                 }
2830         }
2831
2832         /* To handle DMA interrupts */
2833         stmmac_dma_interrupt(priv);
2834
2835         return IRQ_HANDLED;
2836 }
2837
2838 #ifdef CONFIG_NET_POLL_CONTROLLER
2839 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2840  * to allow network I/O with interrupts disabled.
2841  */
2842 static void stmmac_poll_controller(struct net_device *dev)
2843 {
2844         disable_irq(dev->irq);
2845         stmmac_interrupt(dev->irq, dev);
2846         enable_irq(dev->irq);
2847 }
2848 #endif
2849
2850 /**
2851  *  stmmac_ioctl - Entry point for the Ioctl
2852  *  @dev: Device pointer.
2853  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2854  *  a proprietary structure used to pass information to the driver.
2855  *  @cmd: IOCTL command
2856  *  Description:
2857  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2858  */
2859 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2860 {
2861         int ret = -EOPNOTSUPP;
2862
2863         if (!netif_running(dev))
2864                 return -EINVAL;
2865
2866         switch (cmd) {
2867         case SIOCGMIIPHY:
2868         case SIOCGMIIREG:
2869         case SIOCSMIIREG:
2870                 if (!dev->phydev)
2871                         return -EINVAL;
2872                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2873                 break;
2874         case SIOCSHWTSTAMP:
2875                 ret = stmmac_hwtstamp_ioctl(dev, rq);
2876                 break;
2877         default:
2878                 break;
2879         }
2880
2881         return ret;
2882 }
2883
2884 #ifdef CONFIG_DEBUG_FS
2885 static struct dentry *stmmac_fs_dir;
2886
2887 static void sysfs_display_ring(void *head, int size, int extend_desc,
2888                                struct seq_file *seq)
2889 {
2890         int i;
2891         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2892         struct dma_desc *p = (struct dma_desc *)head;
2893
2894         for (i = 0; i < size; i++) {
2895                 if (extend_desc) {
2896                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2897                                    i, (unsigned int)virt_to_phys(ep),
2898                                    le32_to_cpu(ep->basic.des0),
2899                                    le32_to_cpu(ep->basic.des1),
2900                                    le32_to_cpu(ep->basic.des2),
2901                                    le32_to_cpu(ep->basic.des3));
2902                         ep++;
2903                 } else {
2904                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2905                                    i, (unsigned int)virt_to_phys(ep),
2906                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2907                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2908                         p++;
2909                 }
2910                 seq_printf(seq, "\n");
2911         }
2912 }
2913
2914 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2915 {
2916         struct net_device *dev = seq->private;
2917         struct stmmac_priv *priv = netdev_priv(dev);
2918
2919         if (priv->extend_desc) {
2920                 seq_printf(seq, "Extended RX descriptor ring:\n");
2921                 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2922                 seq_printf(seq, "Extended TX descriptor ring:\n");
2923                 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2924         } else {
2925                 seq_printf(seq, "RX descriptor ring:\n");
2926                 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2927                 seq_printf(seq, "TX descriptor ring:\n");
2928                 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2929         }
2930
2931         return 0;
2932 }
2933
2934 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2935 {
2936         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2937 }
2938
2939 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
2940
2941 static const struct file_operations stmmac_rings_status_fops = {
2942         .owner = THIS_MODULE,
2943         .open = stmmac_sysfs_ring_open,
2944         .read = seq_read,
2945         .llseek = seq_lseek,
2946         .release = single_release,
2947 };
2948
2949 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2950 {
2951         struct net_device *dev = seq->private;
2952         struct stmmac_priv *priv = netdev_priv(dev);
2953
2954         if (!priv->hw_cap_support) {
2955                 seq_printf(seq, "DMA HW features not supported\n");
2956                 return 0;
2957         }
2958
2959         seq_printf(seq, "==============================\n");
2960         seq_printf(seq, "\tDMA HW features\n");
2961         seq_printf(seq, "==============================\n");
2962
2963         seq_printf(seq, "\t10/100 Mbps: %s\n",
2964                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2965         seq_printf(seq, "\t1000 Mbps: %s\n",
2966                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
2967         seq_printf(seq, "\tHalf duplex: %s\n",
2968                    (priv->dma_cap.half_duplex) ? "Y" : "N");
2969         seq_printf(seq, "\tHash Filter: %s\n",
2970                    (priv->dma_cap.hash_filter) ? "Y" : "N");
2971         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2972                    (priv->dma_cap.multi_addr) ? "Y" : "N");
2973         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
2974                    (priv->dma_cap.pcs) ? "Y" : "N");
2975         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2976                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
2977         seq_printf(seq, "\tPMT Remote wake up: %s\n",
2978                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2979         seq_printf(seq, "\tPMT Magic Frame: %s\n",
2980                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2981         seq_printf(seq, "\tRMON module: %s\n",
2982                    (priv->dma_cap.rmon) ? "Y" : "N");
2983         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2984                    (priv->dma_cap.time_stamp) ? "Y" : "N");
2985         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
2986                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
2987         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
2988                    (priv->dma_cap.eee) ? "Y" : "N");
2989         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2990         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2991                    (priv->dma_cap.tx_coe) ? "Y" : "N");
2992         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2993                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
2994                            (priv->dma_cap.rx_coe) ? "Y" : "N");
2995         } else {
2996                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2997                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2998                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2999                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3000         }
3001         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3002                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3003         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3004                    priv->dma_cap.number_rx_channel);
3005         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3006                    priv->dma_cap.number_tx_channel);
3007         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3008                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3009
3010         return 0;
3011 }
3012
3013 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3014 {
3015         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3016 }
3017
3018 static const struct file_operations stmmac_dma_cap_fops = {
3019         .owner = THIS_MODULE,
3020         .open = stmmac_sysfs_dma_cap_open,
3021         .read = seq_read,
3022         .llseek = seq_lseek,
3023         .release = single_release,
3024 };
3025
3026 static int stmmac_init_fs(struct net_device *dev)
3027 {
3028         struct stmmac_priv *priv = netdev_priv(dev);
3029
3030         /* Create per netdev entries */
3031         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3032
3033         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3034                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3035
3036                 return -ENOMEM;
3037         }
3038
3039         /* Entry to report DMA RX/TX rings */
3040         priv->dbgfs_rings_status =
3041                 debugfs_create_file("descriptors_status", S_IRUGO,
3042                                     priv->dbgfs_dir, dev,
3043                                     &stmmac_rings_status_fops);
3044
3045         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3046                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3047                 debugfs_remove_recursive(priv->dbgfs_dir);
3048
3049                 return -ENOMEM;
3050         }
3051
3052         /* Entry to report the DMA HW features */
3053         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3054                                             priv->dbgfs_dir,
3055                                             dev, &stmmac_dma_cap_fops);
3056
3057         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3058                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3059                 debugfs_remove_recursive(priv->dbgfs_dir);
3060
3061                 return -ENOMEM;
3062         }
3063
3064         return 0;
3065 }
3066
3067 static void stmmac_exit_fs(struct net_device *dev)
3068 {
3069         struct stmmac_priv *priv = netdev_priv(dev);
3070
3071         debugfs_remove_recursive(priv->dbgfs_dir);
3072 }
3073 #endif /* CONFIG_DEBUG_FS */
3074
3075 static const struct net_device_ops stmmac_netdev_ops = {
3076         .ndo_open = stmmac_open,
3077         .ndo_start_xmit = stmmac_xmit,
3078         .ndo_stop = stmmac_release,
3079         .ndo_change_mtu = stmmac_change_mtu,
3080         .ndo_fix_features = stmmac_fix_features,
3081         .ndo_set_features = stmmac_set_features,
3082         .ndo_set_rx_mode = stmmac_set_rx_mode,
3083         .ndo_tx_timeout = stmmac_tx_timeout,
3084         .ndo_do_ioctl = stmmac_ioctl,
3085 #ifdef CONFIG_NET_POLL_CONTROLLER
3086         .ndo_poll_controller = stmmac_poll_controller,
3087 #endif
3088         .ndo_set_mac_address = eth_mac_addr,
3089 };
3090
3091 /**
3092  *  stmmac_hw_init - Init the MAC device
3093  *  @priv: driver private structure
3094  *  Description: this function is to configure the MAC device according to
3095  *  some platform parameters or the HW capability register. It prepares the
3096  *  driver to use either ring or chain modes and to setup either enhanced or
3097  *  normal descriptors.
3098  */
3099 static int stmmac_hw_init(struct stmmac_priv *priv)
3100 {
3101         struct mac_device_info *mac;
3102
3103         /* Identify the MAC HW device */
3104         if (priv->plat->has_gmac) {
3105                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3106                 mac = dwmac1000_setup(priv->ioaddr,
3107                                       priv->plat->multicast_filter_bins,
3108                                       priv->plat->unicast_filter_entries,
3109                                       &priv->synopsys_id);
3110         } else if (priv->plat->has_gmac4) {
3111                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3112                 mac = dwmac4_setup(priv->ioaddr,
3113                                    priv->plat->multicast_filter_bins,
3114                                    priv->plat->unicast_filter_entries,
3115                                    &priv->synopsys_id);
3116         } else {
3117                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3118         }
3119         if (!mac)
3120                 return -ENOMEM;
3121
3122         priv->hw = mac;
3123
3124         /* To use the chained or ring mode */
3125         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3126                 priv->hw->mode = &dwmac4_ring_mode_ops;
3127         } else {
3128                 if (chain_mode) {
3129                         priv->hw->mode = &chain_mode_ops;
3130                         dev_info(priv->device, "Chain mode enabled\n");
3131                         priv->mode = STMMAC_CHAIN_MODE;
3132                 } else {
3133                         priv->hw->mode = &ring_mode_ops;
3134                         dev_info(priv->device, "Ring mode enabled\n");
3135                         priv->mode = STMMAC_RING_MODE;
3136                 }
3137         }
3138
3139         /* Get the HW capability (new GMAC newer than 3.50a) */
3140         priv->hw_cap_support = stmmac_get_hw_features(priv);
3141         if (priv->hw_cap_support) {
3142                 dev_info(priv->device, "DMA HW capability register supported\n");
3143
3144                 /* We can override some gmac/dma configuration fields: e.g.
3145                  * enh_desc, tx_coe (e.g. that are passed through the
3146                  * platform) with the values from the HW capability
3147                  * register (if supported).
3148                  */
3149                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3150                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3151                 priv->hw->pmt = priv->plat->pmt;
3152
3153                 /* TXCOE doesn't work in thresh DMA mode */
3154                 if (priv->plat->force_thresh_dma_mode)
3155                         priv->plat->tx_coe = 0;
3156                 else
3157                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3158
3159                 /* In case of GMAC4 rx_coe is from HW cap register. */
3160                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3161
3162                 if (priv->dma_cap.rx_coe_type2)
3163                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3164                 else if (priv->dma_cap.rx_coe_type1)
3165                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3166
3167         } else {
3168                 dev_info(priv->device, "No HW DMA feature register supported\n");
3169         }
3170
3171         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3172         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3173                 priv->hw->desc = &dwmac4_desc_ops;
3174         else
3175                 stmmac_selec_desc_mode(priv);
3176
3177         if (priv->plat->rx_coe) {
3178                 priv->hw->rx_csum = priv->plat->rx_coe;
3179                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3180                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3181                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3182         }
3183         if (priv->plat->tx_coe)
3184                 dev_info(priv->device, "TX Checksum insertion supported\n");
3185
3186         if (priv->plat->pmt) {
3187                 dev_info(priv->device, "Wake-Up On Lan supported\n");
3188                 device_set_wakeup_capable(priv->device, 1);
3189         }
3190
3191         if (priv->dma_cap.tsoen)
3192                 dev_info(priv->device, "TSO supported\n");
3193
3194         return 0;
3195 }
3196
3197 /**
3198  * stmmac_dvr_probe
3199  * @device: device pointer
3200  * @plat_dat: platform data pointer
3201  * @res: stmmac resource pointer
3202  * Description: this is the main probe function used to
3203  * call the alloc_etherdev, allocate the priv structure.
3204  * Return:
3205  * returns 0 on success, otherwise errno.
3206  */
3207 int stmmac_dvr_probe(struct device *device,
3208                      struct plat_stmmacenet_data *plat_dat,
3209                      struct stmmac_resources *res)
3210 {
3211         int ret = 0;
3212         struct net_device *ndev = NULL;
3213         struct stmmac_priv *priv;
3214
3215         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3216         if (!ndev)
3217                 return -ENOMEM;
3218
3219         SET_NETDEV_DEV(ndev, device);
3220
3221         priv = netdev_priv(ndev);
3222         priv->device = device;
3223         priv->dev = ndev;
3224
3225         stmmac_set_ethtool_ops(ndev);
3226         priv->pause = pause;
3227         priv->plat = plat_dat;
3228         priv->ioaddr = res->addr;
3229         priv->dev->base_addr = (unsigned long)res->addr;
3230
3231         priv->dev->irq = res->irq;
3232         priv->wol_irq = res->wol_irq;
3233         priv->lpi_irq = res->lpi_irq;
3234
3235         if (res->mac)
3236                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3237
3238         dev_set_drvdata(device, priv->dev);
3239
3240         /* Verify driver arguments */
3241         stmmac_verify_args();
3242
3243         /* Override with kernel parameters if supplied XXX CRS XXX
3244          * this needs to have multiple instances
3245          */
3246         if ((phyaddr >= 0) && (phyaddr <= 31))
3247                 priv->plat->phy_addr = phyaddr;
3248
3249         if (priv->plat->stmmac_rst)
3250                 reset_control_deassert(priv->plat->stmmac_rst);
3251
3252         /* Init MAC and get the capabilities */
3253         ret = stmmac_hw_init(priv);
3254         if (ret)
3255                 goto error_hw_init;
3256
3257         ndev->netdev_ops = &stmmac_netdev_ops;
3258
3259         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3260                             NETIF_F_RXCSUM;
3261
3262         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3263                 ndev->hw_features |= NETIF_F_TSO;
3264                 priv->tso = true;
3265                 dev_info(priv->device, "TSO feature enabled\n");
3266         }
3267         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3268         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3269 #ifdef STMMAC_VLAN_TAG_USED
3270         /* Both mac100 and gmac support receive VLAN tag detection */
3271         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3272 #endif
3273         priv->msg_enable = netif_msg_init(debug, default_msg_level);
3274
3275         /* MTU range: 46 - hw-specific max */
3276         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3277         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3278                 ndev->max_mtu = JUMBO_LEN;
3279         else
3280                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3281         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3282          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3283          */
3284         if ((priv->plat->maxmtu < ndev->max_mtu) &&
3285             (priv->plat->maxmtu >= ndev->min_mtu))
3286                 ndev->max_mtu = priv->plat->maxmtu;
3287         else if (priv->plat->maxmtu < ndev->min_mtu)
3288                 dev_warn(priv->device,
3289                          "%s: warning: maxmtu having invalid value (%d)\n",
3290                          __func__, priv->plat->maxmtu);
3291
3292         if (flow_ctrl)
3293                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
3294
3295         /* Rx Watchdog is available in the COREs newer than the 3.40.
3296          * In some case, for example on bugged HW this feature
3297          * has to be disable and this can be done by passing the
3298          * riwt_off field from the platform.
3299          */
3300         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3301                 priv->use_riwt = 1;
3302                 dev_info(priv->device,
3303                          "Enable RX Mitigation via HW Watchdog Timer\n");
3304         }
3305
3306         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3307
3308         spin_lock_init(&priv->lock);
3309
3310         /* If a specific clk_csr value is passed from the platform
3311          * this means that the CSR Clock Range selection cannot be
3312          * changed at run-time and it is fixed. Viceversa the driver'll try to
3313          * set the MDC clock dynamically according to the csr actual
3314          * clock input.
3315          */
3316         if (!priv->plat->clk_csr)
3317                 stmmac_clk_csr_set(priv);
3318         else
3319                 priv->clk_csr = priv->plat->clk_csr;
3320
3321         stmmac_check_pcs_mode(priv);
3322
3323         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
3324             priv->hw->pcs != STMMAC_PCS_TBI &&
3325             priv->hw->pcs != STMMAC_PCS_RTBI) {
3326                 /* MDIO bus Registration */
3327                 ret = stmmac_mdio_register(ndev);
3328                 if (ret < 0) {
3329                         dev_err(priv->device,
3330                                 "%s: MDIO bus (id: %d) registration failed",
3331                                 __func__, priv->plat->bus_id);
3332                         goto error_mdio_register;
3333                 }
3334         }
3335
3336         ret = register_netdev(ndev);
3337         if (ret) {
3338                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3339                         __func__, ret);
3340                 goto error_netdev_register;
3341         }
3342
3343         return ret;
3344
3345 error_netdev_register:
3346         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3347             priv->hw->pcs != STMMAC_PCS_TBI &&
3348             priv->hw->pcs != STMMAC_PCS_RTBI)
3349                 stmmac_mdio_unregister(ndev);
3350 error_mdio_register:
3351         netif_napi_del(&priv->napi);
3352 error_hw_init:
3353         free_netdev(ndev);
3354
3355         return ret;
3356 }
3357 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3358
3359 /**
3360  * stmmac_dvr_remove
3361  * @dev: device pointer
3362  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3363  * changes the link status, releases the DMA descriptor rings.
3364  */
3365 int stmmac_dvr_remove(struct device *dev)
3366 {
3367         struct net_device *ndev = dev_get_drvdata(dev);
3368         struct stmmac_priv *priv = netdev_priv(ndev);
3369
3370         netdev_info(priv->dev, "%s: removing driver", __func__);
3371
3372         priv->hw->dma->stop_rx(priv->ioaddr);
3373         priv->hw->dma->stop_tx(priv->ioaddr);
3374
3375         stmmac_set_mac(priv->ioaddr, false);
3376         netif_carrier_off(ndev);
3377         unregister_netdev(ndev);
3378         if (priv->plat->stmmac_rst)
3379                 reset_control_assert(priv->plat->stmmac_rst);
3380         clk_disable_unprepare(priv->plat->pclk);
3381         clk_disable_unprepare(priv->plat->stmmac_clk);
3382         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3383             priv->hw->pcs != STMMAC_PCS_TBI &&
3384             priv->hw->pcs != STMMAC_PCS_RTBI)
3385                 stmmac_mdio_unregister(ndev);
3386         free_netdev(ndev);
3387
3388         return 0;
3389 }
3390 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3391
3392 /**
3393  * stmmac_suspend - suspend callback
3394  * @dev: device pointer
3395  * Description: this is the function to suspend the device and it is called
3396  * by the platform driver to stop the network queue, release the resources,
3397  * program the PMT register (for WoL), clean and release driver resources.
3398  */
3399 int stmmac_suspend(struct device *dev)
3400 {
3401         struct net_device *ndev = dev_get_drvdata(dev);
3402         struct stmmac_priv *priv = netdev_priv(ndev);
3403         unsigned long flags;
3404
3405         if (!ndev || !netif_running(ndev))
3406                 return 0;
3407
3408         if (ndev->phydev)
3409                 phy_stop(ndev->phydev);
3410
3411         spin_lock_irqsave(&priv->lock, flags);
3412
3413         netif_device_detach(ndev);
3414         netif_stop_queue(ndev);
3415
3416         napi_disable(&priv->napi);
3417
3418         /* Stop TX/RX DMA */
3419         priv->hw->dma->stop_tx(priv->ioaddr);
3420         priv->hw->dma->stop_rx(priv->ioaddr);
3421
3422         /* Enable Power down mode by programming the PMT regs */
3423         if (device_may_wakeup(priv->device)) {
3424                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3425                 priv->irq_wake = 1;
3426         } else {
3427                 stmmac_set_mac(priv->ioaddr, false);
3428                 pinctrl_pm_select_sleep_state(priv->device);
3429                 /* Disable clock in case of PWM is off */
3430                 clk_disable(priv->plat->pclk);
3431                 clk_disable(priv->plat->stmmac_clk);
3432         }
3433         spin_unlock_irqrestore(&priv->lock, flags);
3434
3435         priv->oldlink = 0;
3436         priv->speed = SPEED_UNKNOWN;
3437         priv->oldduplex = DUPLEX_UNKNOWN;
3438         return 0;
3439 }
3440 EXPORT_SYMBOL_GPL(stmmac_suspend);
3441
3442 /**
3443  * stmmac_resume - resume callback
3444  * @dev: device pointer
3445  * Description: when resume this function is invoked to setup the DMA and CORE
3446  * in a usable state.
3447  */
3448 int stmmac_resume(struct device *dev)
3449 {
3450         struct net_device *ndev = dev_get_drvdata(dev);
3451         struct stmmac_priv *priv = netdev_priv(ndev);
3452         unsigned long flags;
3453
3454         if (!netif_running(ndev))
3455                 return 0;
3456
3457         /* Power Down bit, into the PM register, is cleared
3458          * automatically as soon as a magic packet or a Wake-up frame
3459          * is received. Anyway, it's better to manually clear
3460          * this bit because it can generate problems while resuming
3461          * from another devices (e.g. serial console).
3462          */
3463         if (device_may_wakeup(priv->device)) {
3464                 spin_lock_irqsave(&priv->lock, flags);
3465                 priv->hw->mac->pmt(priv->hw, 0);
3466                 spin_unlock_irqrestore(&priv->lock, flags);
3467                 priv->irq_wake = 0;
3468         } else {
3469                 pinctrl_pm_select_default_state(priv->device);
3470                 /* enable the clk previously disabled */
3471                 clk_enable(priv->plat->stmmac_clk);
3472                 clk_enable(priv->plat->pclk);
3473                 /* reset the phy so that it's ready */
3474                 if (priv->mii)
3475                         stmmac_mdio_reset(priv->mii);
3476         }
3477
3478         netif_device_attach(ndev);
3479
3480         spin_lock_irqsave(&priv->lock, flags);
3481
3482         priv->cur_rx = 0;
3483         priv->dirty_rx = 0;
3484         priv->dirty_tx = 0;
3485         priv->cur_tx = 0;
3486         /* reset private mss value to force mss context settings at
3487          * next tso xmit (only used for gmac4).
3488          */
3489         priv->mss = 0;
3490
3491         stmmac_clear_descriptors(priv);
3492
3493         stmmac_hw_setup(ndev, false);
3494         stmmac_init_tx_coalesce(priv);
3495         stmmac_set_rx_mode(ndev);
3496
3497         napi_enable(&priv->napi);
3498
3499         netif_start_queue(ndev);
3500
3501         spin_unlock_irqrestore(&priv->lock, flags);
3502
3503         if (ndev->phydev)
3504                 phy_start(ndev->phydev);
3505
3506         return 0;
3507 }
3508 EXPORT_SYMBOL_GPL(stmmac_resume);
3509
3510 #ifndef MODULE
3511 static int __init stmmac_cmdline_opt(char *str)
3512 {
3513         char *opt;
3514
3515         if (!str || !*str)
3516                 return -EINVAL;
3517         while ((opt = strsep(&str, ",")) != NULL) {
3518                 if (!strncmp(opt, "debug:", 6)) {
3519                         if (kstrtoint(opt + 6, 0, &debug))
3520                                 goto err;
3521                 } else if (!strncmp(opt, "phyaddr:", 8)) {
3522                         if (kstrtoint(opt + 8, 0, &phyaddr))
3523                                 goto err;
3524                 } else if (!strncmp(opt, "buf_sz:", 7)) {
3525                         if (kstrtoint(opt + 7, 0, &buf_sz))
3526                                 goto err;
3527                 } else if (!strncmp(opt, "tc:", 3)) {
3528                         if (kstrtoint(opt + 3, 0, &tc))
3529                                 goto err;
3530                 } else if (!strncmp(opt, "watchdog:", 9)) {
3531                         if (kstrtoint(opt + 9, 0, &watchdog))
3532                                 goto err;
3533                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3534                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3535                                 goto err;
3536                 } else if (!strncmp(opt, "pause:", 6)) {
3537                         if (kstrtoint(opt + 6, 0, &pause))
3538                                 goto err;
3539                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3540                         if (kstrtoint(opt + 10, 0, &eee_timer))
3541                                 goto err;
3542                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3543                         if (kstrtoint(opt + 11, 0, &chain_mode))
3544                                 goto err;
3545                 }
3546         }
3547         return 0;
3548
3549 err:
3550         pr_err("%s: ERROR broken module parameter conversion", __func__);
3551         return -EINVAL;
3552 }
3553
3554 __setup("stmmaceth=", stmmac_cmdline_opt);
3555 #endif /* MODULE */
3556
3557 static int __init stmmac_init(void)
3558 {
3559 #ifdef CONFIG_DEBUG_FS
3560         /* Create debugfs main directory if it doesn't exist yet */
3561         if (!stmmac_fs_dir) {
3562                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3563
3564                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3565                         pr_err("ERROR %s, debugfs create directory failed\n",
3566                                STMMAC_RESOURCE_NAME);
3567
3568                         return -ENOMEM;
3569                 }
3570         }
3571 #endif
3572
3573         return 0;
3574 }
3575
3576 static void __exit stmmac_exit(void)
3577 {
3578 #ifdef CONFIG_DEBUG_FS
3579         debugfs_remove_recursive(stmmac_fs_dir);
3580 #endif
3581 }
3582
3583 module_init(stmmac_init)
3584 module_exit(stmmac_exit)
3585
3586 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3587 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3588 MODULE_LICENSE("GPL");