]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: mtl rx queue enabled as dcb or avb
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190         u32 avail;
191
192         if (priv->dirty_tx > priv->cur_tx)
193                 avail = priv->dirty_tx - priv->cur_tx - 1;
194         else
195                 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197         return avail;
198 }
199
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202         u32 dirty;
203
204         if (priv->dirty_rx <= priv->cur_rx)
205                 dirty = priv->cur_rx - priv->dirty_rx;
206         else
207                 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209         return dirty;
210 }
211
212 /**
213  * stmmac_hw_fix_mac_speed - callback for speed selection
214  * @priv: driver private structure
215  * Description: on some platforms (e.g. ST), some HW system configuration
216  * registers have to be set according to the link speed negotiated.
217  */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220         struct net_device *ndev = priv->dev;
221         struct phy_device *phydev = ndev->phydev;
222
223         if (likely(priv->plat->fix_mac_speed))
224                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228  * stmmac_enable_eee_mode - check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode in case of
231  * EEE.
232  */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235         /* Check and enter in LPI mode */
236         if ((priv->dirty_tx == priv->cur_tx) &&
237             (priv->tx_path_in_lpi_mode == false))
238                 priv->hw->mac->set_eee_mode(priv->hw,
239                                             priv->plat->en_tx_lpi_clockgating);
240 }
241
242 /**
243  * stmmac_disable_eee_mode - disable and exit from LPI mode
244  * @priv: driver private structure
245  * Description: this function is to exit and disable EEE in case of
246  * LPI state is true. This is called by the xmit.
247  */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250         priv->hw->mac->reset_eee_mode(priv->hw);
251         del_timer_sync(&priv->eee_ctrl_timer);
252         priv->tx_path_in_lpi_mode = false;
253 }
254
255 /**
256  * stmmac_eee_ctrl_timer - EEE TX SW timer.
257  * @arg : data hook
258  * Description:
259  *  if there is no data transfer and if we are not in LPI state,
260  *  then MAC Transmitter can be moved to LPI state.
261  */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266         stmmac_enable_eee_mode(priv);
267         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269
270 /**
271  * stmmac_eee_init - init EEE
272  * @priv: driver private structure
273  * Description:
274  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
275  *  can also manage EEE, this function enable the LPI state and start related
276  *  timer.
277  */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280         struct net_device *ndev = priv->dev;
281         unsigned long flags;
282         bool ret = false;
283
284         /* Using PCS we cannot dial with the phy registers at this stage
285          * so we do not support extra feature like EEE.
286          */
287         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288             (priv->hw->pcs == STMMAC_PCS_TBI) ||
289             (priv->hw->pcs == STMMAC_PCS_RTBI))
290                 goto out;
291
292         /* MAC core supports the EEE feature. */
293         if (priv->dma_cap.eee) {
294                 int tx_lpi_timer = priv->tx_lpi_timer;
295
296                 /* Check if the PHY supports EEE */
297                 if (phy_init_eee(ndev->phydev, 1)) {
298                         /* To manage at run-time if the EEE cannot be supported
299                          * anymore (for example because the lp caps have been
300                          * changed).
301                          * In that case the driver disable own timers.
302                          */
303                         spin_lock_irqsave(&priv->lock, flags);
304                         if (priv->eee_active) {
305                                 netdev_dbg(priv->dev, "disable EEE\n");
306                                 del_timer_sync(&priv->eee_ctrl_timer);
307                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
308                                                              tx_lpi_timer);
309                         }
310                         priv->eee_active = 0;
311                         spin_unlock_irqrestore(&priv->lock, flags);
312                         goto out;
313                 }
314                 /* Activate the EEE and start timers */
315                 spin_lock_irqsave(&priv->lock, flags);
316                 if (!priv->eee_active) {
317                         priv->eee_active = 1;
318                         setup_timer(&priv->eee_ctrl_timer,
319                                     stmmac_eee_ctrl_timer,
320                                     (unsigned long)priv);
321                         mod_timer(&priv->eee_ctrl_timer,
322                                   STMMAC_LPI_T(eee_timer));
323
324                         priv->hw->mac->set_eee_timer(priv->hw,
325                                                      STMMAC_DEFAULT_LIT_LS,
326                                                      tx_lpi_timer);
327                 }
328                 /* Set HW EEE according to the speed */
329                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330
331                 ret = true;
332                 spin_unlock_irqrestore(&priv->lock, flags);
333
334                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335         }
336 out:
337         return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @p : descriptor pointer
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349                                    struct dma_desc *p, struct sk_buff *skb)
350 {
351         struct skb_shared_hwtstamps shhwtstamp;
352         u64 ns;
353
354         if (!priv->hwts_tx_en)
355                 return;
356
357         /* exit if skb doesn't support hw tstamp */
358         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359                 return;
360
361         /* check tx tstamp status */
362         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363                 /* get the valid tstamp */
364                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365
366                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
368
369                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370                 /* pass tstamp to stack */
371                 skb_tstamp_tx(skb, &shhwtstamp);
372         }
373
374         return;
375 }
376
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378  * @priv: driver private structure
379  * @p : descriptor pointer
380  * @np : next descriptor pointer
381  * @skb : the socket buffer
382  * Description :
383  * This function will read received packet's timestamp from the descriptor
384  * and pass it to stack. It also perform some sanity checks.
385  */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387                                    struct dma_desc *np, struct sk_buff *skb)
388 {
389         struct skb_shared_hwtstamps *shhwtstamp = NULL;
390         u64 ns;
391
392         if (!priv->hwts_rx_en)
393                 return;
394
395         /* Check if timestamp is available */
396         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397                 /* For GMAC4, the valid timestamp is from CTX next desc. */
398                 if (priv->plat->has_gmac4)
399                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400                 else
401                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402
403                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404                 shhwtstamp = skb_hwtstamps(skb);
405                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407         } else  {
408                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409         }
410 }
411
412 /**
413  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
414  *  @dev: device pointer.
415  *  @ifr: An IOCTL specific structure, that can contain a pointer to
416  *  a proprietary structure used to pass information to the driver.
417  *  Description:
418  *  This function configures the MAC to enable/disable both outgoing(TX)
419  *  and incoming(RX) packets time stamping based on user input.
420  *  Return Value:
421  *  0 on success and an appropriate -ve integer on failure.
422  */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425         struct stmmac_priv *priv = netdev_priv(dev);
426         struct hwtstamp_config config;
427         struct timespec64 now;
428         u64 temp = 0;
429         u32 ptp_v2 = 0;
430         u32 tstamp_all = 0;
431         u32 ptp_over_ipv4_udp = 0;
432         u32 ptp_over_ipv6_udp = 0;
433         u32 ptp_over_ethernet = 0;
434         u32 snap_type_sel = 0;
435         u32 ts_master_en = 0;
436         u32 ts_event_en = 0;
437         u32 value = 0;
438         u32 sec_inc;
439
440         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441                 netdev_alert(priv->dev, "No support for HW time stamping\n");
442                 priv->hwts_tx_en = 0;
443                 priv->hwts_rx_en = 0;
444
445                 return -EOPNOTSUPP;
446         }
447
448         if (copy_from_user(&config, ifr->ifr_data,
449                            sizeof(struct hwtstamp_config)))
450                 return -EFAULT;
451
452         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453                    __func__, config.flags, config.tx_type, config.rx_filter);
454
455         /* reserved for future extensions */
456         if (config.flags)
457                 return -EINVAL;
458
459         if (config.tx_type != HWTSTAMP_TX_OFF &&
460             config.tx_type != HWTSTAMP_TX_ON)
461                 return -ERANGE;
462
463         if (priv->adv_ts) {
464                 switch (config.rx_filter) {
465                 case HWTSTAMP_FILTER_NONE:
466                         /* time stamp no incoming packet at all */
467                         config.rx_filter = HWTSTAMP_FILTER_NONE;
468                         break;
469
470                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471                         /* PTP v1, UDP, any kind of event packet */
472                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473                         /* take time stamp for all event messages */
474                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478                         break;
479
480                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481                         /* PTP v1, UDP, Sync packet */
482                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483                         /* take time stamp for SYNC messages only */
484                         ts_event_en = PTP_TCR_TSEVNTENA;
485
486                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488                         break;
489
490                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491                         /* PTP v1, UDP, Delay_req packet */
492                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493                         /* take time stamp for Delay_Req messages only */
494                         ts_master_en = PTP_TCR_TSMSTRENA;
495                         ts_event_en = PTP_TCR_TSEVNTENA;
496
497                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499                         break;
500
501                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502                         /* PTP v2, UDP, any kind of event packet */
503                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504                         ptp_v2 = PTP_TCR_TSVER2ENA;
505                         /* take time stamp for all event messages */
506                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510                         break;
511
512                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513                         /* PTP v2, UDP, Sync packet */
514                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515                         ptp_v2 = PTP_TCR_TSVER2ENA;
516                         /* take time stamp for SYNC messages only */
517                         ts_event_en = PTP_TCR_TSEVNTENA;
518
519                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521                         break;
522
523                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524                         /* PTP v2, UDP, Delay_req packet */
525                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526                         ptp_v2 = PTP_TCR_TSVER2ENA;
527                         /* take time stamp for Delay_Req messages only */
528                         ts_master_en = PTP_TCR_TSMSTRENA;
529                         ts_event_en = PTP_TCR_TSEVNTENA;
530
531                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533                         break;
534
535                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
536                         /* PTP v2/802.AS1 any layer, any kind of event packet */
537                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538                         ptp_v2 = PTP_TCR_TSVER2ENA;
539                         /* take time stamp for all event messages */
540                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544                         ptp_over_ethernet = PTP_TCR_TSIPENA;
545                         break;
546
547                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
548                         /* PTP v2/802.AS1, any layer, Sync packet */
549                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550                         ptp_v2 = PTP_TCR_TSVER2ENA;
551                         /* take time stamp for SYNC messages only */
552                         ts_event_en = PTP_TCR_TSEVNTENA;
553
554                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556                         ptp_over_ethernet = PTP_TCR_TSIPENA;
557                         break;
558
559                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560                         /* PTP v2/802.AS1, any layer, Delay_req packet */
561                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562                         ptp_v2 = PTP_TCR_TSVER2ENA;
563                         /* take time stamp for Delay_Req messages only */
564                         ts_master_en = PTP_TCR_TSMSTRENA;
565                         ts_event_en = PTP_TCR_TSEVNTENA;
566
567                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569                         ptp_over_ethernet = PTP_TCR_TSIPENA;
570                         break;
571
572                 case HWTSTAMP_FILTER_ALL:
573                         /* time stamp any incoming packet */
574                         config.rx_filter = HWTSTAMP_FILTER_ALL;
575                         tstamp_all = PTP_TCR_TSENALL;
576                         break;
577
578                 default:
579                         return -ERANGE;
580                 }
581         } else {
582                 switch (config.rx_filter) {
583                 case HWTSTAMP_FILTER_NONE:
584                         config.rx_filter = HWTSTAMP_FILTER_NONE;
585                         break;
586                 default:
587                         /* PTP v1, UDP, any kind of event packet */
588                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589                         break;
590                 }
591         }
592         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594
595         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597         else {
598                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599                          tstamp_all | ptp_v2 | ptp_over_ethernet |
600                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601                          ts_master_en | snap_type_sel);
602                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603
604                 /* program Sub Second Increment reg */
605                 sec_inc = priv->hw->ptp->config_sub_second_increment(
606                         priv->ptpaddr, priv->plat->clk_ptp_rate,
607                         priv->plat->has_gmac4);
608                 temp = div_u64(1000000000ULL, sec_inc);
609
610                 /* calculate default added value:
611                  * formula is :
612                  * addend = (2^32)/freq_div_ratio;
613                  * where, freq_div_ratio = 1e9ns/sec_inc
614                  */
615                 temp = (u64)(temp << 32);
616                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617                 priv->hw->ptp->config_addend(priv->ptpaddr,
618                                              priv->default_addend);
619
620                 /* initialize system time */
621                 ktime_get_real_ts64(&now);
622
623                 /* lower 32 bits of tv_sec are safe until y2106 */
624                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625                                             now.tv_nsec);
626         }
627
628         return copy_to_user(ifr->ifr_data, &config,
629                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631
632 /**
633  * stmmac_init_ptp - init PTP
634  * @priv: driver private structure
635  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636  * This is done by looking at the HW cap. register.
637  * This function also registers the ptp driver.
638  */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642                 return -EOPNOTSUPP;
643
644         priv->adv_ts = 0;
645         /* Check if adv_ts can be enabled for dwmac 4.x core */
646         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647                 priv->adv_ts = 1;
648         /* Dwmac 3.x core with extend_desc can support adv_ts */
649         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650                 priv->adv_ts = 1;
651
652         if (priv->dma_cap.time_stamp)
653                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654
655         if (priv->adv_ts)
656                 netdev_info(priv->dev,
657                             "IEEE 1588-2008 Advanced Timestamp supported\n");
658
659         priv->hw->ptp = &stmmac_ptp;
660         priv->hwts_tx_en = 0;
661         priv->hwts_rx_en = 0;
662
663         stmmac_ptp_register(priv);
664
665         return 0;
666 }
667
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670         if (priv->plat->clk_ptp_ref)
671                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
672         stmmac_ptp_unregister(priv);
673 }
674
675 /**
676  * stmmac_adjust_link - adjusts the link parameters
677  * @dev: net device structure
678  * Description: this is the helper called by the physical abstraction layer
679  * drivers to communicate the phy link status. According the speed and duplex
680  * this driver can invoke registered glue-logic as well.
681  * It also invoke the eee initialization because it could happen when switch
682  * on different networks (that are eee capable).
683  */
684 static void stmmac_adjust_link(struct net_device *dev)
685 {
686         struct stmmac_priv *priv = netdev_priv(dev);
687         struct phy_device *phydev = dev->phydev;
688         unsigned long flags;
689         int new_state = 0;
690         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691
692         if (!phydev)
693                 return;
694
695         spin_lock_irqsave(&priv->lock, flags);
696
697         if (phydev->link) {
698                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
699
700                 /* Now we make sure that we can be in full duplex mode.
701                  * If not, we operate in half-duplex mode. */
702                 if (phydev->duplex != priv->oldduplex) {
703                         new_state = 1;
704                         if (!(phydev->duplex))
705                                 ctrl &= ~priv->hw->link.duplex;
706                         else
707                                 ctrl |= priv->hw->link.duplex;
708                         priv->oldduplex = phydev->duplex;
709                 }
710                 /* Flow Control operation */
711                 if (phydev->pause)
712                         priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
713                                                  fc, pause_time);
714
715                 if (phydev->speed != priv->speed) {
716                         new_state = 1;
717                         switch (phydev->speed) {
718                         case 1000:
719                                 if (priv->plat->has_gmac ||
720                                     priv->plat->has_gmac4)
721                                         ctrl &= ~priv->hw->link.port;
722                                 break;
723                         case 100:
724                                 if (priv->plat->has_gmac ||
725                                     priv->plat->has_gmac4) {
726                                         ctrl |= priv->hw->link.port;
727                                         ctrl |= priv->hw->link.speed;
728                                 } else {
729                                         ctrl &= ~priv->hw->link.port;
730                                 }
731                                 break;
732                         case 10:
733                                 if (priv->plat->has_gmac ||
734                                     priv->plat->has_gmac4) {
735                                         ctrl |= priv->hw->link.port;
736                                         ctrl &= ~(priv->hw->link.speed);
737                                 } else {
738                                         ctrl &= ~priv->hw->link.port;
739                                 }
740                                 break;
741                         default:
742                                 netif_warn(priv, link, priv->dev,
743                                            "broken speed: %d\n", phydev->speed);
744                                 phydev->speed = SPEED_UNKNOWN;
745                                 break;
746                         }
747                         if (phydev->speed != SPEED_UNKNOWN)
748                                 stmmac_hw_fix_mac_speed(priv);
749                         priv->speed = phydev->speed;
750                 }
751
752                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
753
754                 if (!priv->oldlink) {
755                         new_state = 1;
756                         priv->oldlink = 1;
757                 }
758         } else if (priv->oldlink) {
759                 new_state = 1;
760                 priv->oldlink = 0;
761                 priv->speed = SPEED_UNKNOWN;
762                 priv->oldduplex = DUPLEX_UNKNOWN;
763         }
764
765         if (new_state && netif_msg_link(priv))
766                 phy_print_status(phydev);
767
768         spin_unlock_irqrestore(&priv->lock, flags);
769
770         if (phydev->is_pseudo_fixed_link)
771                 /* Stop PHY layer to call the hook to adjust the link in case
772                  * of a switch is attached to the stmmac driver.
773                  */
774                 phydev->irq = PHY_IGNORE_INTERRUPT;
775         else
776                 /* At this stage, init the EEE if supported.
777                  * Never called in case of fixed_link.
778                  */
779                 priv->eee_enabled = stmmac_eee_init(priv);
780 }
781
782 /**
783  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
784  * @priv: driver private structure
785  * Description: this is to verify if the HW supports the PCS.
786  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
787  * configured for the TBI, RTBI, or SGMII PHY interface.
788  */
789 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
790 {
791         int interface = priv->plat->interface;
792
793         if (priv->dma_cap.pcs) {
794                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
795                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
796                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
797                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
798                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
799                         priv->hw->pcs = STMMAC_PCS_RGMII;
800                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
801                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
802                         priv->hw->pcs = STMMAC_PCS_SGMII;
803                 }
804         }
805 }
806
807 /**
808  * stmmac_init_phy - PHY initialization
809  * @dev: net device structure
810  * Description: it initializes the driver's PHY state, and attaches the PHY
811  * to the mac driver.
812  *  Return value:
813  *  0 on success
814  */
815 static int stmmac_init_phy(struct net_device *dev)
816 {
817         struct stmmac_priv *priv = netdev_priv(dev);
818         struct phy_device *phydev;
819         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
820         char bus_id[MII_BUS_ID_SIZE];
821         int interface = priv->plat->interface;
822         int max_speed = priv->plat->max_speed;
823         priv->oldlink = 0;
824         priv->speed = SPEED_UNKNOWN;
825         priv->oldduplex = DUPLEX_UNKNOWN;
826
827         if (priv->plat->phy_node) {
828                 phydev = of_phy_connect(dev, priv->plat->phy_node,
829                                         &stmmac_adjust_link, 0, interface);
830         } else {
831                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
832                          priv->plat->bus_id);
833
834                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
835                          priv->plat->phy_addr);
836                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
837                            phy_id_fmt);
838
839                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
840                                      interface);
841         }
842
843         if (IS_ERR_OR_NULL(phydev)) {
844                 netdev_err(priv->dev, "Could not attach to PHY\n");
845                 if (!phydev)
846                         return -ENODEV;
847
848                 return PTR_ERR(phydev);
849         }
850
851         /* Stop Advertising 1000BASE Capability if interface is not GMII */
852         if ((interface == PHY_INTERFACE_MODE_MII) ||
853             (interface == PHY_INTERFACE_MODE_RMII) ||
854                 (max_speed < 1000 && max_speed > 0))
855                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
856                                          SUPPORTED_1000baseT_Full);
857
858         /*
859          * Broken HW is sometimes missing the pull-up resistor on the
860          * MDIO line, which results in reads to non-existent devices returning
861          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
862          * device as well.
863          * Note: phydev->phy_id is the result of reading the UID PHY registers.
864          */
865         if (!priv->plat->phy_node && phydev->phy_id == 0) {
866                 phy_disconnect(phydev);
867                 return -ENODEV;
868         }
869
870         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
871          * subsequent PHY polling, make sure we force a link transition if
872          * we have a UP/DOWN/UP transition
873          */
874         if (phydev->is_pseudo_fixed_link)
875                 phydev->irq = PHY_POLL;
876
877         phy_attached_info(phydev);
878         return 0;
879 }
880
881 static void stmmac_display_rings(struct stmmac_priv *priv)
882 {
883         void *head_rx, *head_tx;
884
885         if (priv->extend_desc) {
886                 head_rx = (void *)priv->dma_erx;
887                 head_tx = (void *)priv->dma_etx;
888         } else {
889                 head_rx = (void *)priv->dma_rx;
890                 head_tx = (void *)priv->dma_tx;
891         }
892
893         /* Display Rx ring */
894         priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895         /* Display Tx ring */
896         priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 }
898
899 static int stmmac_set_bfsize(int mtu, int bufsize)
900 {
901         int ret = bufsize;
902
903         if (mtu >= BUF_SIZE_4KiB)
904                 ret = BUF_SIZE_8KiB;
905         else if (mtu >= BUF_SIZE_2KiB)
906                 ret = BUF_SIZE_4KiB;
907         else if (mtu > DEFAULT_BUFSIZE)
908                 ret = BUF_SIZE_2KiB;
909         else
910                 ret = DEFAULT_BUFSIZE;
911
912         return ret;
913 }
914
915 /**
916  * stmmac_clear_descriptors - clear descriptors
917  * @priv: driver private structure
918  * Description: this function is called to clear the tx and rx descriptors
919  * in case of both basic and extended descriptors are used.
920  */
921 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922 {
923         int i;
924
925         /* Clear the Rx/Tx descriptors */
926         for (i = 0; i < DMA_RX_SIZE; i++)
927                 if (priv->extend_desc)
928                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929                                                      priv->use_riwt, priv->mode,
930                                                      (i == DMA_RX_SIZE - 1));
931                 else
932                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933                                                      priv->use_riwt, priv->mode,
934                                                      (i == DMA_RX_SIZE - 1));
935         for (i = 0; i < DMA_TX_SIZE; i++)
936                 if (priv->extend_desc)
937                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938                                                      priv->mode,
939                                                      (i == DMA_TX_SIZE - 1));
940                 else
941                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942                                                      priv->mode,
943                                                      (i == DMA_TX_SIZE - 1));
944 }
945
946 /**
947  * stmmac_init_rx_buffers - init the RX descriptor buffer.
948  * @priv: driver private structure
949  * @p: descriptor pointer
950  * @i: descriptor index
951  * @flags: gfp flag.
952  * Description: this function is called to allocate a receive buffer, perform
953  * the DMA mapping and init the descriptor.
954  */
955 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956                                   int i, gfp_t flags)
957 {
958         struct sk_buff *skb;
959
960         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961         if (!skb) {
962                 netdev_err(priv->dev,
963                            "%s: Rx init fails; skb is NULL\n", __func__);
964                 return -ENOMEM;
965         }
966         priv->rx_skbuff[i] = skb;
967         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968                                                 priv->dma_buf_sz,
969                                                 DMA_FROM_DEVICE);
970         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
971                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972                 dev_kfree_skb_any(skb);
973                 return -EINVAL;
974         }
975
976         if (priv->synopsys_id >= DWMAC_CORE_4_00)
977                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
978         else
979                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
980
981         if ((priv->hw->mode->init_desc3) &&
982             (priv->dma_buf_sz == BUF_SIZE_16KiB))
983                 priv->hw->mode->init_desc3(p);
984
985         return 0;
986 }
987
988 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
989 {
990         if (priv->rx_skbuff[i]) {
991                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
992                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
993                 dev_kfree_skb_any(priv->rx_skbuff[i]);
994         }
995         priv->rx_skbuff[i] = NULL;
996 }
997
998 /**
999  * init_dma_desc_rings - init the RX/TX descriptor rings
1000  * @dev: net device structure
1001  * @flags: gfp flag.
1002  * Description: this function initializes the DMA RX/TX descriptors
1003  * and allocates the socket buffers. It supports the chained and ring
1004  * modes.
1005  */
1006 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007 {
1008         int i;
1009         struct stmmac_priv *priv = netdev_priv(dev);
1010         unsigned int bfsize = 0;
1011         int ret = -ENOMEM;
1012
1013         if (priv->hw->mode->set_16kib_bfsize)
1014                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1015
1016         if (bfsize < BUF_SIZE_16KiB)
1017                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1018
1019         priv->dma_buf_sz = bfsize;
1020
1021         netif_dbg(priv, probe, priv->dev,
1022                   "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023                   __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024
1025         /* RX INITIALIZATION */
1026         netif_dbg(priv, probe, priv->dev,
1027                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028
1029         for (i = 0; i < DMA_RX_SIZE; i++) {
1030                 struct dma_desc *p;
1031                 if (priv->extend_desc)
1032                         p = &((priv->dma_erx + i)->basic);
1033                 else
1034                         p = priv->dma_rx + i;
1035
1036                 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1037                 if (ret)
1038                         goto err_init_rx_buffers;
1039
1040                 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1041                           priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1042                           (unsigned int)priv->rx_skbuff_dma[i]);
1043         }
1044         priv->cur_rx = 0;
1045         priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046         buf_sz = bfsize;
1047
1048         /* Setup the chained descriptor addresses */
1049         if (priv->mode == STMMAC_CHAIN_MODE) {
1050                 if (priv->extend_desc) {
1051                         priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1052                                              DMA_RX_SIZE, 1);
1053                         priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1054                                              DMA_TX_SIZE, 1);
1055                 } else {
1056                         priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1057                                              DMA_RX_SIZE, 0);
1058                         priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1059                                              DMA_TX_SIZE, 0);
1060                 }
1061         }
1062
1063         /* TX INITIALIZATION */
1064         for (i = 0; i < DMA_TX_SIZE; i++) {
1065                 struct dma_desc *p;
1066                 if (priv->extend_desc)
1067                         p = &((priv->dma_etx + i)->basic);
1068                 else
1069                         p = priv->dma_tx + i;
1070
1071                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1072                         p->des0 = 0;
1073                         p->des1 = 0;
1074                         p->des2 = 0;
1075                         p->des3 = 0;
1076                 } else {
1077                         p->des2 = 0;
1078                 }
1079
1080                 priv->tx_skbuff_dma[i].buf = 0;
1081                 priv->tx_skbuff_dma[i].map_as_page = false;
1082                 priv->tx_skbuff_dma[i].len = 0;
1083                 priv->tx_skbuff_dma[i].last_segment = false;
1084                 priv->tx_skbuff[i] = NULL;
1085         }
1086
1087         priv->dirty_tx = 0;
1088         priv->cur_tx = 0;
1089         netdev_reset_queue(priv->dev);
1090
1091         stmmac_clear_descriptors(priv);
1092
1093         if (netif_msg_hw(priv))
1094                 stmmac_display_rings(priv);
1095
1096         return 0;
1097 err_init_rx_buffers:
1098         while (--i >= 0)
1099                 stmmac_free_rx_buffers(priv, i);
1100         return ret;
1101 }
1102
1103 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1104 {
1105         int i;
1106
1107         for (i = 0; i < DMA_RX_SIZE; i++)
1108                 stmmac_free_rx_buffers(priv, i);
1109 }
1110
1111 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112 {
1113         int i;
1114
1115         for (i = 0; i < DMA_TX_SIZE; i++) {
1116                 if (priv->tx_skbuff_dma[i].buf) {
1117                         if (priv->tx_skbuff_dma[i].map_as_page)
1118                                 dma_unmap_page(priv->device,
1119                                                priv->tx_skbuff_dma[i].buf,
1120                                                priv->tx_skbuff_dma[i].len,
1121                                                DMA_TO_DEVICE);
1122                         else
1123                                 dma_unmap_single(priv->device,
1124                                                  priv->tx_skbuff_dma[i].buf,
1125                                                  priv->tx_skbuff_dma[i].len,
1126                                                  DMA_TO_DEVICE);
1127                 }
1128
1129                 if (priv->tx_skbuff[i]) {
1130                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1131                         priv->tx_skbuff[i] = NULL;
1132                         priv->tx_skbuff_dma[i].buf = 0;
1133                         priv->tx_skbuff_dma[i].map_as_page = false;
1134                 }
1135         }
1136 }
1137
1138 /**
1139  * alloc_dma_desc_resources - alloc TX/RX resources.
1140  * @priv: private structure
1141  * Description: according to which descriptor can be used (extend or basic)
1142  * this function allocates the resources for TX and RX paths. In case of
1143  * reception, for example, it pre-allocated the RX socket buffer in order to
1144  * allow zero-copy mechanism.
1145  */
1146 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1147 {
1148         int ret = -ENOMEM;
1149
1150         priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1151                                             GFP_KERNEL);
1152         if (!priv->rx_skbuff_dma)
1153                 return -ENOMEM;
1154
1155         priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1156                                         GFP_KERNEL);
1157         if (!priv->rx_skbuff)
1158                 goto err_rx_skbuff;
1159
1160         priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1161                                             sizeof(*priv->tx_skbuff_dma),
1162                                             GFP_KERNEL);
1163         if (!priv->tx_skbuff_dma)
1164                 goto err_tx_skbuff_dma;
1165
1166         priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167                                         GFP_KERNEL);
1168         if (!priv->tx_skbuff)
1169                 goto err_tx_skbuff;
1170
1171         if (priv->extend_desc) {
1172                 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173                                                     sizeof(struct
1174                                                            dma_extended_desc),
1175                                                     &priv->dma_rx_phy,
1176                                                     GFP_KERNEL);
1177                 if (!priv->dma_erx)
1178                         goto err_dma;
1179
1180                 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1181                                                     sizeof(struct
1182                                                            dma_extended_desc),
1183                                                     &priv->dma_tx_phy,
1184                                                     GFP_KERNEL);
1185                 if (!priv->dma_etx) {
1186                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1187                                           sizeof(struct dma_extended_desc),
1188                                           priv->dma_erx, priv->dma_rx_phy);
1189                         goto err_dma;
1190                 }
1191         } else {
1192                 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1193                                                    sizeof(struct dma_desc),
1194                                                    &priv->dma_rx_phy,
1195                                                    GFP_KERNEL);
1196                 if (!priv->dma_rx)
1197                         goto err_dma;
1198
1199                 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1200                                                    sizeof(struct dma_desc),
1201                                                    &priv->dma_tx_phy,
1202                                                    GFP_KERNEL);
1203                 if (!priv->dma_tx) {
1204                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1205                                           sizeof(struct dma_desc),
1206                                           priv->dma_rx, priv->dma_rx_phy);
1207                         goto err_dma;
1208                 }
1209         }
1210
1211         return 0;
1212
1213 err_dma:
1214         kfree(priv->tx_skbuff);
1215 err_tx_skbuff:
1216         kfree(priv->tx_skbuff_dma);
1217 err_tx_skbuff_dma:
1218         kfree(priv->rx_skbuff);
1219 err_rx_skbuff:
1220         kfree(priv->rx_skbuff_dma);
1221         return ret;
1222 }
1223
1224 static void free_dma_desc_resources(struct stmmac_priv *priv)
1225 {
1226         /* Release the DMA TX/RX socket buffers */
1227         dma_free_rx_skbufs(priv);
1228         dma_free_tx_skbufs(priv);
1229
1230         /* Free DMA regions of consistent memory previously allocated */
1231         if (!priv->extend_desc) {
1232                 dma_free_coherent(priv->device,
1233                                   DMA_TX_SIZE * sizeof(struct dma_desc),
1234                                   priv->dma_tx, priv->dma_tx_phy);
1235                 dma_free_coherent(priv->device,
1236                                   DMA_RX_SIZE * sizeof(struct dma_desc),
1237                                   priv->dma_rx, priv->dma_rx_phy);
1238         } else {
1239                 dma_free_coherent(priv->device, DMA_TX_SIZE *
1240                                   sizeof(struct dma_extended_desc),
1241                                   priv->dma_etx, priv->dma_tx_phy);
1242                 dma_free_coherent(priv->device, DMA_RX_SIZE *
1243                                   sizeof(struct dma_extended_desc),
1244                                   priv->dma_erx, priv->dma_rx_phy);
1245         }
1246         kfree(priv->rx_skbuff_dma);
1247         kfree(priv->rx_skbuff);
1248         kfree(priv->tx_skbuff_dma);
1249         kfree(priv->tx_skbuff);
1250 }
1251
1252 /**
1253  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1254  *  @priv: driver private structure
1255  *  Description: It is used for enabling the rx queues in the MAC
1256  */
1257 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258 {
1259         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1260         int queue;
1261         u8 mode;
1262
1263         for (queue = 0; queue < rx_queues_count; queue++) {
1264                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1265                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1266         }
1267 }
1268
1269 /**
1270  *  stmmac_dma_operation_mode - HW DMA operation mode
1271  *  @priv: driver private structure
1272  *  Description: it is used for configuring the DMA operation mode register in
1273  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1274  */
1275 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1276 {
1277         int rxfifosz = priv->plat->rx_fifo_size;
1278
1279         if (rxfifosz == 0)
1280                 rxfifosz = priv->dma_cap.rx_fifo_size;
1281
1282         if (priv->plat->force_thresh_dma_mode)
1283                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1284         else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1285                 /*
1286                  * In case of GMAC, SF mode can be enabled
1287                  * to perform the TX COE in HW. This depends on:
1288                  * 1) TX COE if actually supported
1289                  * 2) There is no bugged Jumbo frame support
1290                  *    that needs to not insert csum in the TDES.
1291                  */
1292                 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1293                                         rxfifosz);
1294                 priv->xstats.threshold = SF_DMA_MODE;
1295         } else
1296                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1297                                         rxfifosz);
1298 }
1299
1300 /**
1301  * stmmac_tx_clean - to manage the transmission completion
1302  * @priv: driver private structure
1303  * Description: it reclaims the transmit resources after transmission completes.
1304  */
1305 static void stmmac_tx_clean(struct stmmac_priv *priv)
1306 {
1307         unsigned int bytes_compl = 0, pkts_compl = 0;
1308         unsigned int entry = priv->dirty_tx;
1309
1310         netif_tx_lock(priv->dev);
1311
1312         priv->xstats.tx_clean++;
1313
1314         while (entry != priv->cur_tx) {
1315                 struct sk_buff *skb = priv->tx_skbuff[entry];
1316                 struct dma_desc *p;
1317                 int status;
1318
1319                 if (priv->extend_desc)
1320                         p = (struct dma_desc *)(priv->dma_etx + entry);
1321                 else
1322                         p = priv->dma_tx + entry;
1323
1324                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1325                                                       &priv->xstats, p,
1326                                                       priv->ioaddr);
1327                 /* Check if the descriptor is owned by the DMA */
1328                 if (unlikely(status & tx_dma_own))
1329                         break;
1330
1331                 /* Just consider the last segment and ...*/
1332                 if (likely(!(status & tx_not_ls))) {
1333                         /* ... verify the status error condition */
1334                         if (unlikely(status & tx_err)) {
1335                                 priv->dev->stats.tx_errors++;
1336                         } else {
1337                                 priv->dev->stats.tx_packets++;
1338                                 priv->xstats.tx_pkt_n++;
1339                         }
1340                         stmmac_get_tx_hwtstamp(priv, p, skb);
1341                 }
1342
1343                 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1344                         if (priv->tx_skbuff_dma[entry].map_as_page)
1345                                 dma_unmap_page(priv->device,
1346                                                priv->tx_skbuff_dma[entry].buf,
1347                                                priv->tx_skbuff_dma[entry].len,
1348                                                DMA_TO_DEVICE);
1349                         else
1350                                 dma_unmap_single(priv->device,
1351                                                  priv->tx_skbuff_dma[entry].buf,
1352                                                  priv->tx_skbuff_dma[entry].len,
1353                                                  DMA_TO_DEVICE);
1354                         priv->tx_skbuff_dma[entry].buf = 0;
1355                         priv->tx_skbuff_dma[entry].len = 0;
1356                         priv->tx_skbuff_dma[entry].map_as_page = false;
1357                 }
1358
1359                 if (priv->hw->mode->clean_desc3)
1360                         priv->hw->mode->clean_desc3(priv, p);
1361
1362                 priv->tx_skbuff_dma[entry].last_segment = false;
1363                 priv->tx_skbuff_dma[entry].is_jumbo = false;
1364
1365                 if (likely(skb != NULL)) {
1366                         pkts_compl++;
1367                         bytes_compl += skb->len;
1368                         dev_consume_skb_any(skb);
1369                         priv->tx_skbuff[entry] = NULL;
1370                 }
1371
1372                 priv->hw->desc->release_tx_desc(p, priv->mode);
1373
1374                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1375         }
1376         priv->dirty_tx = entry;
1377
1378         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1379
1380         if (unlikely(netif_queue_stopped(priv->dev) &&
1381             stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1382                 netif_dbg(priv, tx_done, priv->dev,
1383                           "%s: restart transmit\n", __func__);
1384                 netif_wake_queue(priv->dev);
1385         }
1386
1387         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1388                 stmmac_enable_eee_mode(priv);
1389                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1390         }
1391         netif_tx_unlock(priv->dev);
1392 }
1393
1394 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1395 {
1396         priv->hw->dma->enable_dma_irq(priv->ioaddr);
1397 }
1398
1399 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1400 {
1401         priv->hw->dma->disable_dma_irq(priv->ioaddr);
1402 }
1403
1404 /**
1405  * stmmac_tx_err - to manage the tx error
1406  * @priv: driver private structure
1407  * Description: it cleans the descriptors and restarts the transmission
1408  * in case of transmission errors.
1409  */
1410 static void stmmac_tx_err(struct stmmac_priv *priv)
1411 {
1412         int i;
1413         netif_stop_queue(priv->dev);
1414
1415         priv->hw->dma->stop_tx(priv->ioaddr);
1416         dma_free_tx_skbufs(priv);
1417         for (i = 0; i < DMA_TX_SIZE; i++)
1418                 if (priv->extend_desc)
1419                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1420                                                      priv->mode,
1421                                                      (i == DMA_TX_SIZE - 1));
1422                 else
1423                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1424                                                      priv->mode,
1425                                                      (i == DMA_TX_SIZE - 1));
1426         priv->dirty_tx = 0;
1427         priv->cur_tx = 0;
1428         netdev_reset_queue(priv->dev);
1429         priv->hw->dma->start_tx(priv->ioaddr);
1430
1431         priv->dev->stats.tx_errors++;
1432         netif_wake_queue(priv->dev);
1433 }
1434
1435 /**
1436  * stmmac_dma_interrupt - DMA ISR
1437  * @priv: driver private structure
1438  * Description: this is the DMA ISR. It is called by the main ISR.
1439  * It calls the dwmac dma routine and schedule poll method in case of some
1440  * work can be done.
1441  */
1442 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1443 {
1444         int status;
1445         int rxfifosz = priv->plat->rx_fifo_size;
1446
1447         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1448         if (likely((status & handle_rx)) || (status & handle_tx)) {
1449                 if (likely(napi_schedule_prep(&priv->napi))) {
1450                         stmmac_disable_dma_irq(priv);
1451                         __napi_schedule(&priv->napi);
1452                 }
1453         }
1454         if (unlikely(status & tx_hard_error_bump_tc)) {
1455                 /* Try to bump up the dma threshold on this failure */
1456                 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1457                     (tc <= 256)) {
1458                         tc += 64;
1459                         if (priv->plat->force_thresh_dma_mode)
1460                                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1461                                                         rxfifosz);
1462                         else
1463                                 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1464                                                         SF_DMA_MODE, rxfifosz);
1465                         priv->xstats.threshold = tc;
1466                 }
1467         } else if (unlikely(status == tx_hard_error))
1468                 stmmac_tx_err(priv);
1469 }
1470
1471 /**
1472  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1473  * @priv: driver private structure
1474  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1475  */
1476 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1477 {
1478         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1479                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1480
1481         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1482                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1483                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1484         } else {
1485                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1486                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1487         }
1488
1489         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1490
1491         if (priv->dma_cap.rmon) {
1492                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1493                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1494         } else
1495                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1496 }
1497
1498 /**
1499  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1500  * @priv: driver private structure
1501  * Description: select the Enhanced/Alternate or Normal descriptors.
1502  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1503  * supported by the HW capability register.
1504  */
1505 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1506 {
1507         if (priv->plat->enh_desc) {
1508                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1509
1510                 /* GMAC older than 3.50 has no extended descriptors */
1511                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1512                         dev_info(priv->device, "Enabled extended descriptors\n");
1513                         priv->extend_desc = 1;
1514                 } else
1515                         dev_warn(priv->device, "Extended descriptors not supported\n");
1516
1517                 priv->hw->desc = &enh_desc_ops;
1518         } else {
1519                 dev_info(priv->device, "Normal descriptors\n");
1520                 priv->hw->desc = &ndesc_ops;
1521         }
1522 }
1523
1524 /**
1525  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1526  * @priv: driver private structure
1527  * Description:
1528  *  new GMAC chip generations have a new register to indicate the
1529  *  presence of the optional feature/functions.
1530  *  This can be also used to override the value passed through the
1531  *  platform and necessary for old MAC10/100 and GMAC chips.
1532  */
1533 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1534 {
1535         u32 ret = 0;
1536
1537         if (priv->hw->dma->get_hw_feature) {
1538                 priv->hw->dma->get_hw_feature(priv->ioaddr,
1539                                               &priv->dma_cap);
1540                 ret = 1;
1541         }
1542
1543         return ret;
1544 }
1545
1546 /**
1547  * stmmac_check_ether_addr - check if the MAC addr is valid
1548  * @priv: driver private structure
1549  * Description:
1550  * it is to verify if the MAC address is valid, in case of failures it
1551  * generates a random MAC address
1552  */
1553 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1554 {
1555         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1556                 priv->hw->mac->get_umac_addr(priv->hw,
1557                                              priv->dev->dev_addr, 0);
1558                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1559                         eth_hw_addr_random(priv->dev);
1560                 netdev_info(priv->dev, "device MAC address %pM\n",
1561                             priv->dev->dev_addr);
1562         }
1563 }
1564
1565 /**
1566  * stmmac_init_dma_engine - DMA init.
1567  * @priv: driver private structure
1568  * Description:
1569  * It inits the DMA invoking the specific MAC/GMAC callback.
1570  * Some DMA parameters can be passed from the platform;
1571  * in case of these are not passed a default is kept for the MAC or GMAC.
1572  */
1573 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1574 {
1575         int atds = 0;
1576         int ret = 0;
1577
1578         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1579                 dev_err(priv->device, "Invalid DMA configuration\n");
1580                 return -EINVAL;
1581         }
1582
1583         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1584                 atds = 1;
1585
1586         ret = priv->hw->dma->reset(priv->ioaddr);
1587         if (ret) {
1588                 dev_err(priv->device, "Failed to reset the dma\n");
1589                 return ret;
1590         }
1591
1592         priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1593                             priv->dma_tx_phy, priv->dma_rx_phy, atds);
1594
1595         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1596                 priv->rx_tail_addr = priv->dma_rx_phy +
1597                             (DMA_RX_SIZE * sizeof(struct dma_desc));
1598                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1599                                                STMMAC_CHAN0);
1600
1601                 priv->tx_tail_addr = priv->dma_tx_phy +
1602                             (DMA_TX_SIZE * sizeof(struct dma_desc));
1603                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1604                                                STMMAC_CHAN0);
1605         }
1606
1607         if (priv->plat->axi && priv->hw->dma->axi)
1608                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1609
1610         return ret;
1611 }
1612
1613 /**
1614  * stmmac_tx_timer - mitigation sw timer for tx.
1615  * @data: data pointer
1616  * Description:
1617  * This is the timer handler to directly invoke the stmmac_tx_clean.
1618  */
1619 static void stmmac_tx_timer(unsigned long data)
1620 {
1621         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1622
1623         stmmac_tx_clean(priv);
1624 }
1625
1626 /**
1627  * stmmac_init_tx_coalesce - init tx mitigation options.
1628  * @priv: driver private structure
1629  * Description:
1630  * This inits the transmit coalesce parameters: i.e. timer rate,
1631  * timer handler and default threshold used for enabling the
1632  * interrupt on completion bit.
1633  */
1634 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1635 {
1636         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1637         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1638         init_timer(&priv->txtimer);
1639         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1640         priv->txtimer.data = (unsigned long)priv;
1641         priv->txtimer.function = stmmac_tx_timer;
1642         add_timer(&priv->txtimer);
1643 }
1644
1645 /**
1646  *  stmmac_set_tx_queue_weight - Set TX queue weight
1647  *  @priv: driver private structure
1648  *  Description: It is used for setting TX queues weight
1649  */
1650 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
1651 {
1652         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1653         u32 weight;
1654         u32 queue;
1655
1656         for (queue = 0; queue < tx_queues_count; queue++) {
1657                 weight = priv->plat->tx_queues_cfg[queue].weight;
1658                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
1659         }
1660 }
1661
1662 /**
1663  *  stmmac_mtl_configuration - Configure MTL
1664  *  @priv: driver private structure
1665  *  Description: It is used for configurring MTL
1666  */
1667 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
1668 {
1669         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1670         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1671
1672         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
1673                 stmmac_set_tx_queue_weight(priv);
1674
1675         /* Configure MTL RX algorithms */
1676         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
1677                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
1678                                                 priv->plat->rx_sched_algorithm);
1679
1680         /* Configure MTL TX algorithms */
1681         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
1682                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
1683                                                 priv->plat->tx_sched_algorithm);
1684
1685         /* Enable MAC RX Queues */
1686         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
1687                 stmmac_mac_enable_rx_queues(priv);
1688 }
1689
1690 /**
1691  * stmmac_hw_setup - setup mac in a usable state.
1692  *  @dev : pointer to the device structure.
1693  *  Description:
1694  *  this is the main function to setup the HW in a usable state because the
1695  *  dma engine is reset, the core registers are configured (e.g. AXI,
1696  *  Checksum features, timers). The DMA is ready to start receiving and
1697  *  transmitting.
1698  *  Return value:
1699  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1700  *  file on failure.
1701  */
1702 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1703 {
1704         struct stmmac_priv *priv = netdev_priv(dev);
1705         int ret;
1706
1707         /* DMA initialization and SW reset */
1708         ret = stmmac_init_dma_engine(priv);
1709         if (ret < 0) {
1710                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1711                            __func__);
1712                 return ret;
1713         }
1714
1715         /* Copy the MAC addr into the HW  */
1716         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1717
1718         /* PS and related bits will be programmed according to the speed */
1719         if (priv->hw->pcs) {
1720                 int speed = priv->plat->mac_port_sel_speed;
1721
1722                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1723                     (speed == SPEED_1000)) {
1724                         priv->hw->ps = speed;
1725                 } else {
1726                         dev_warn(priv->device, "invalid port speed\n");
1727                         priv->hw->ps = 0;
1728                 }
1729         }
1730
1731         /* Initialize the MAC Core */
1732         priv->hw->mac->core_init(priv->hw, dev->mtu);
1733
1734         /* Initialize MTL*/
1735         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1736                 stmmac_mtl_configuration(priv);
1737
1738         ret = priv->hw->mac->rx_ipc(priv->hw);
1739         if (!ret) {
1740                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1741                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1742                 priv->hw->rx_csum = 0;
1743         }
1744
1745         /* Enable the MAC Rx/Tx */
1746         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1747                 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1748         else
1749                 stmmac_set_mac(priv->ioaddr, true);
1750
1751         /* Set the HW DMA mode and the COE */
1752         stmmac_dma_operation_mode(priv);
1753
1754         stmmac_mmc_setup(priv);
1755
1756         if (init_ptp) {
1757                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1758                 if (ret < 0)
1759                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
1760
1761                 ret = stmmac_init_ptp(priv);
1762                 if (ret == -EOPNOTSUPP)
1763                         netdev_warn(priv->dev, "PTP not supported by HW\n");
1764                 else if (ret)
1765                         netdev_warn(priv->dev, "PTP init failed\n");
1766         }
1767
1768 #ifdef CONFIG_DEBUG_FS
1769         ret = stmmac_init_fs(dev);
1770         if (ret < 0)
1771                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1772                             __func__);
1773 #endif
1774         /* Start the ball rolling... */
1775         netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1776         priv->hw->dma->start_tx(priv->ioaddr);
1777         priv->hw->dma->start_rx(priv->ioaddr);
1778
1779         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1780
1781         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1782                 priv->rx_riwt = MAX_DMA_RIWT;
1783                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1784         }
1785
1786         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1787                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1788
1789         /*  set TX ring length */
1790         if (priv->hw->dma->set_tx_ring_len)
1791                 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1792                                                (DMA_TX_SIZE - 1));
1793         /*  set RX ring length */
1794         if (priv->hw->dma->set_rx_ring_len)
1795                 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1796                                                (DMA_RX_SIZE - 1));
1797         /* Enable TSO */
1798         if (priv->tso)
1799                 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1800
1801         return 0;
1802 }
1803
1804 static void stmmac_hw_teardown(struct net_device *dev)
1805 {
1806         struct stmmac_priv *priv = netdev_priv(dev);
1807
1808         clk_disable_unprepare(priv->plat->clk_ptp_ref);
1809 }
1810
1811 /**
1812  *  stmmac_open - open entry point of the driver
1813  *  @dev : pointer to the device structure.
1814  *  Description:
1815  *  This function is the open entry point of the driver.
1816  *  Return value:
1817  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1818  *  file on failure.
1819  */
1820 static int stmmac_open(struct net_device *dev)
1821 {
1822         struct stmmac_priv *priv = netdev_priv(dev);
1823         int ret;
1824
1825         stmmac_check_ether_addr(priv);
1826
1827         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1828             priv->hw->pcs != STMMAC_PCS_TBI &&
1829             priv->hw->pcs != STMMAC_PCS_RTBI) {
1830                 ret = stmmac_init_phy(dev);
1831                 if (ret) {
1832                         netdev_err(priv->dev,
1833                                    "%s: Cannot attach to PHY (error: %d)\n",
1834                                    __func__, ret);
1835                         return ret;
1836                 }
1837         }
1838
1839         /* Extra statistics */
1840         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1841         priv->xstats.threshold = tc;
1842
1843         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1844         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1845
1846         ret = alloc_dma_desc_resources(priv);
1847         if (ret < 0) {
1848                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1849                            __func__);
1850                 goto dma_desc_error;
1851         }
1852
1853         ret = init_dma_desc_rings(dev, GFP_KERNEL);
1854         if (ret < 0) {
1855                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1856                            __func__);
1857                 goto init_error;
1858         }
1859
1860         ret = stmmac_hw_setup(dev, true);
1861         if (ret < 0) {
1862                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1863                 goto init_error;
1864         }
1865
1866         stmmac_init_tx_coalesce(priv);
1867
1868         if (dev->phydev)
1869                 phy_start(dev->phydev);
1870
1871         /* Request the IRQ lines */
1872         ret = request_irq(dev->irq, stmmac_interrupt,
1873                           IRQF_SHARED, dev->name, dev);
1874         if (unlikely(ret < 0)) {
1875                 netdev_err(priv->dev,
1876                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1877                            __func__, dev->irq, ret);
1878                 goto irq_error;
1879         }
1880
1881         /* Request the Wake IRQ in case of another line is used for WoL */
1882         if (priv->wol_irq != dev->irq) {
1883                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1884                                   IRQF_SHARED, dev->name, dev);
1885                 if (unlikely(ret < 0)) {
1886                         netdev_err(priv->dev,
1887                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1888                                    __func__, priv->wol_irq, ret);
1889                         goto wolirq_error;
1890                 }
1891         }
1892
1893         /* Request the IRQ lines */
1894         if (priv->lpi_irq > 0) {
1895                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1896                                   dev->name, dev);
1897                 if (unlikely(ret < 0)) {
1898                         netdev_err(priv->dev,
1899                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1900                                    __func__, priv->lpi_irq, ret);
1901                         goto lpiirq_error;
1902                 }
1903         }
1904
1905         napi_enable(&priv->napi);
1906         netif_start_queue(dev);
1907
1908         return 0;
1909
1910 lpiirq_error:
1911         if (priv->wol_irq != dev->irq)
1912                 free_irq(priv->wol_irq, dev);
1913 wolirq_error:
1914         free_irq(dev->irq, dev);
1915 irq_error:
1916         if (dev->phydev)
1917                 phy_stop(dev->phydev);
1918
1919         del_timer_sync(&priv->txtimer);
1920         stmmac_hw_teardown(dev);
1921 init_error:
1922         free_dma_desc_resources(priv);
1923 dma_desc_error:
1924         if (dev->phydev)
1925                 phy_disconnect(dev->phydev);
1926
1927         return ret;
1928 }
1929
1930 /**
1931  *  stmmac_release - close entry point of the driver
1932  *  @dev : device pointer.
1933  *  Description:
1934  *  This is the stop entry point of the driver.
1935  */
1936 static int stmmac_release(struct net_device *dev)
1937 {
1938         struct stmmac_priv *priv = netdev_priv(dev);
1939
1940         if (priv->eee_enabled)
1941                 del_timer_sync(&priv->eee_ctrl_timer);
1942
1943         /* Stop and disconnect the PHY */
1944         if (dev->phydev) {
1945                 phy_stop(dev->phydev);
1946                 phy_disconnect(dev->phydev);
1947         }
1948
1949         netif_stop_queue(dev);
1950
1951         napi_disable(&priv->napi);
1952
1953         del_timer_sync(&priv->txtimer);
1954
1955         /* Free the IRQ lines */
1956         free_irq(dev->irq, dev);
1957         if (priv->wol_irq != dev->irq)
1958                 free_irq(priv->wol_irq, dev);
1959         if (priv->lpi_irq > 0)
1960                 free_irq(priv->lpi_irq, dev);
1961
1962         /* Stop TX/RX DMA and clear the descriptors */
1963         priv->hw->dma->stop_tx(priv->ioaddr);
1964         priv->hw->dma->stop_rx(priv->ioaddr);
1965
1966         /* Release and free the Rx/Tx resources */
1967         free_dma_desc_resources(priv);
1968
1969         /* Disable the MAC Rx/Tx */
1970         stmmac_set_mac(priv->ioaddr, false);
1971
1972         netif_carrier_off(dev);
1973
1974 #ifdef CONFIG_DEBUG_FS
1975         stmmac_exit_fs(dev);
1976 #endif
1977
1978         stmmac_release_ptp(priv);
1979
1980         return 0;
1981 }
1982
1983 /**
1984  *  stmmac_tso_allocator - close entry point of the driver
1985  *  @priv: driver private structure
1986  *  @des: buffer start address
1987  *  @total_len: total length to fill in descriptors
1988  *  @last_segmant: condition for the last descriptor
1989  *  Description:
1990  *  This function fills descriptor and request new descriptors according to
1991  *  buffer length to fill
1992  */
1993 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1994                                  int total_len, bool last_segment)
1995 {
1996         struct dma_desc *desc;
1997         int tmp_len;
1998         u32 buff_size;
1999
2000         tmp_len = total_len;
2001
2002         while (tmp_len > 0) {
2003                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2004                 desc = priv->dma_tx + priv->cur_tx;
2005
2006                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2007                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2008                             TSO_MAX_BUFF_SIZE : tmp_len;
2009
2010                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2011                         0, 1,
2012                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2013                         0, 0);
2014
2015                 tmp_len -= TSO_MAX_BUFF_SIZE;
2016         }
2017 }
2018
2019 /**
2020  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2021  *  @skb : the socket buffer
2022  *  @dev : device pointer
2023  *  Description: this is the transmit function that is called on TSO frames
2024  *  (support available on GMAC4 and newer chips).
2025  *  Diagram below show the ring programming in case of TSO frames:
2026  *
2027  *  First Descriptor
2028  *   --------
2029  *   | DES0 |---> buffer1 = L2/L3/L4 header
2030  *   | DES1 |---> TCP Payload (can continue on next descr...)
2031  *   | DES2 |---> buffer 1 and 2 len
2032  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2033  *   --------
2034  *      |
2035  *     ...
2036  *      |
2037  *   --------
2038  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2039  *   | DES1 | --|
2040  *   | DES2 | --> buffer 1 and 2 len
2041  *   | DES3 |
2042  *   --------
2043  *
2044  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2045  */
2046 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2047 {
2048         u32 pay_len, mss;
2049         int tmp_pay_len = 0;
2050         struct stmmac_priv *priv = netdev_priv(dev);
2051         int nfrags = skb_shinfo(skb)->nr_frags;
2052         unsigned int first_entry, des;
2053         struct dma_desc *desc, *first, *mss_desc = NULL;
2054         u8 proto_hdr_len;
2055         int i;
2056
2057         /* Compute header lengths */
2058         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2059
2060         /* Desc availability based on threshold should be enough safe */
2061         if (unlikely(stmmac_tx_avail(priv) <
2062                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2063                 if (!netif_queue_stopped(dev)) {
2064                         netif_stop_queue(dev);
2065                         /* This is a hard error, log it. */
2066                         netdev_err(priv->dev,
2067                                    "%s: Tx Ring full when queue awake\n",
2068                                    __func__);
2069                 }
2070                 return NETDEV_TX_BUSY;
2071         }
2072
2073         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2074
2075         mss = skb_shinfo(skb)->gso_size;
2076
2077         /* set new MSS value if needed */
2078         if (mss != priv->mss) {
2079                 mss_desc = priv->dma_tx + priv->cur_tx;
2080                 priv->hw->desc->set_mss(mss_desc, mss);
2081                 priv->mss = mss;
2082                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2083         }
2084
2085         if (netif_msg_tx_queued(priv)) {
2086                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2087                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2088                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2089                         skb->data_len);
2090         }
2091
2092         first_entry = priv->cur_tx;
2093
2094         desc = priv->dma_tx + first_entry;
2095         first = desc;
2096
2097         /* first descriptor: fill Headers on Buf1 */
2098         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2099                              DMA_TO_DEVICE);
2100         if (dma_mapping_error(priv->device, des))
2101                 goto dma_map_err;
2102
2103         priv->tx_skbuff_dma[first_entry].buf = des;
2104         priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2105         priv->tx_skbuff[first_entry] = skb;
2106
2107         first->des0 = cpu_to_le32(des);
2108
2109         /* Fill start of payload in buff2 of first descriptor */
2110         if (pay_len)
2111                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2112
2113         /* If needed take extra descriptors to fill the remaining payload */
2114         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2115
2116         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2117
2118         /* Prepare fragments */
2119         for (i = 0; i < nfrags; i++) {
2120                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2121
2122                 des = skb_frag_dma_map(priv->device, frag, 0,
2123                                        skb_frag_size(frag),
2124                                        DMA_TO_DEVICE);
2125                 if (dma_mapping_error(priv->device, des))
2126                         goto dma_map_err;
2127
2128                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2129                                      (i == nfrags - 1));
2130
2131                 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2132                 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2133                 priv->tx_skbuff[priv->cur_tx] = NULL;
2134                 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2135         }
2136
2137         priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2138
2139         priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2140
2141         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2142                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2143                           __func__);
2144                 netif_stop_queue(dev);
2145         }
2146
2147         dev->stats.tx_bytes += skb->len;
2148         priv->xstats.tx_tso_frames++;
2149         priv->xstats.tx_tso_nfrags += nfrags;
2150
2151         /* Manage tx mitigation */
2152         priv->tx_count_frames += nfrags + 1;
2153         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2154                 mod_timer(&priv->txtimer,
2155                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2156         } else {
2157                 priv->tx_count_frames = 0;
2158                 priv->hw->desc->set_tx_ic(desc);
2159                 priv->xstats.tx_set_ic_bit++;
2160         }
2161
2162         if (!priv->hwts_tx_en)
2163                 skb_tx_timestamp(skb);
2164
2165         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2166                      priv->hwts_tx_en)) {
2167                 /* declare that device is doing timestamping */
2168                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2169                 priv->hw->desc->enable_tx_timestamp(first);
2170         }
2171
2172         /* Complete the first descriptor before granting the DMA */
2173         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2174                         proto_hdr_len,
2175                         pay_len,
2176                         1, priv->tx_skbuff_dma[first_entry].last_segment,
2177                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2178
2179         /* If context desc is used to change MSS */
2180         if (mss_desc)
2181                 priv->hw->desc->set_tx_owner(mss_desc);
2182
2183         /* The own bit must be the latest setting done when prepare the
2184          * descriptor and then barrier is needed to make sure that
2185          * all is coherent before granting the DMA engine.
2186          */
2187         dma_wmb();
2188
2189         if (netif_msg_pktdata(priv)) {
2190                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2191                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2192                         priv->cur_tx, first, nfrags);
2193
2194                 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2195                                              0);
2196
2197                 pr_info(">>> frame to be transmitted: ");
2198                 print_pkt(skb->data, skb_headlen(skb));
2199         }
2200
2201         netdev_sent_queue(dev, skb->len);
2202
2203         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2204                                        STMMAC_CHAN0);
2205
2206         return NETDEV_TX_OK;
2207
2208 dma_map_err:
2209         dev_err(priv->device, "Tx dma map failed\n");
2210         dev_kfree_skb(skb);
2211         priv->dev->stats.tx_dropped++;
2212         return NETDEV_TX_OK;
2213 }
2214
2215 /**
2216  *  stmmac_xmit - Tx entry point of the driver
2217  *  @skb : the socket buffer
2218  *  @dev : device pointer
2219  *  Description : this is the tx entry point of the driver.
2220  *  It programs the chain or the ring and supports oversized frames
2221  *  and SG feature.
2222  */
2223 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2224 {
2225         struct stmmac_priv *priv = netdev_priv(dev);
2226         unsigned int nopaged_len = skb_headlen(skb);
2227         int i, csum_insertion = 0, is_jumbo = 0;
2228         int nfrags = skb_shinfo(skb)->nr_frags;
2229         unsigned int entry, first_entry;
2230         struct dma_desc *desc, *first;
2231         unsigned int enh_desc;
2232         unsigned int des;
2233
2234         /* Manage oversized TCP frames for GMAC4 device */
2235         if (skb_is_gso(skb) && priv->tso) {
2236                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2237                         return stmmac_tso_xmit(skb, dev);
2238         }
2239
2240         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2241                 if (!netif_queue_stopped(dev)) {
2242                         netif_stop_queue(dev);
2243                         /* This is a hard error, log it. */
2244                         netdev_err(priv->dev,
2245                                    "%s: Tx Ring full when queue awake\n",
2246                                    __func__);
2247                 }
2248                 return NETDEV_TX_BUSY;
2249         }
2250
2251         if (priv->tx_path_in_lpi_mode)
2252                 stmmac_disable_eee_mode(priv);
2253
2254         entry = priv->cur_tx;
2255         first_entry = entry;
2256
2257         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2258
2259         if (likely(priv->extend_desc))
2260                 desc = (struct dma_desc *)(priv->dma_etx + entry);
2261         else
2262                 desc = priv->dma_tx + entry;
2263
2264         first = desc;
2265
2266         priv->tx_skbuff[first_entry] = skb;
2267
2268         enh_desc = priv->plat->enh_desc;
2269         /* To program the descriptors according to the size of the frame */
2270         if (enh_desc)
2271                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2272
2273         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2274                                          DWMAC_CORE_4_00)) {
2275                 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2276                 if (unlikely(entry < 0))
2277                         goto dma_map_err;
2278         }
2279
2280         for (i = 0; i < nfrags; i++) {
2281                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2282                 int len = skb_frag_size(frag);
2283                 bool last_segment = (i == (nfrags - 1));
2284
2285                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2286
2287                 if (likely(priv->extend_desc))
2288                         desc = (struct dma_desc *)(priv->dma_etx + entry);
2289                 else
2290                         desc = priv->dma_tx + entry;
2291
2292                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2293                                        DMA_TO_DEVICE);
2294                 if (dma_mapping_error(priv->device, des))
2295                         goto dma_map_err; /* should reuse desc w/o issues */
2296
2297                 priv->tx_skbuff[entry] = NULL;
2298
2299                 priv->tx_skbuff_dma[entry].buf = des;
2300                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2301                         desc->des0 = cpu_to_le32(des);
2302                 else
2303                         desc->des2 = cpu_to_le32(des);
2304
2305                 priv->tx_skbuff_dma[entry].map_as_page = true;
2306                 priv->tx_skbuff_dma[entry].len = len;
2307                 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2308
2309                 /* Prepare the descriptor and set the own bit too */
2310                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2311                                                 priv->mode, 1, last_segment);
2312         }
2313
2314         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2315
2316         priv->cur_tx = entry;
2317
2318         if (netif_msg_pktdata(priv)) {
2319                 void *tx_head;
2320
2321                 netdev_dbg(priv->dev,
2322                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2323                            __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2324                            entry, first, nfrags);
2325
2326                 if (priv->extend_desc)
2327                         tx_head = (void *)priv->dma_etx;
2328                 else
2329                         tx_head = (void *)priv->dma_tx;
2330
2331                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2332
2333                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2334                 print_pkt(skb->data, skb->len);
2335         }
2336
2337         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2338                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2339                           __func__);
2340                 netif_stop_queue(dev);
2341         }
2342
2343         dev->stats.tx_bytes += skb->len;
2344
2345         /* According to the coalesce parameter the IC bit for the latest
2346          * segment is reset and the timer re-started to clean the tx status.
2347          * This approach takes care about the fragments: desc is the first
2348          * element in case of no SG.
2349          */
2350         priv->tx_count_frames += nfrags + 1;
2351         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2352                 mod_timer(&priv->txtimer,
2353                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2354         } else {
2355                 priv->tx_count_frames = 0;
2356                 priv->hw->desc->set_tx_ic(desc);
2357                 priv->xstats.tx_set_ic_bit++;
2358         }
2359
2360         if (!priv->hwts_tx_en)
2361                 skb_tx_timestamp(skb);
2362
2363         /* Ready to fill the first descriptor and set the OWN bit w/o any
2364          * problems because all the descriptors are actually ready to be
2365          * passed to the DMA engine.
2366          */
2367         if (likely(!is_jumbo)) {
2368                 bool last_segment = (nfrags == 0);
2369
2370                 des = dma_map_single(priv->device, skb->data,
2371                                      nopaged_len, DMA_TO_DEVICE);
2372                 if (dma_mapping_error(priv->device, des))
2373                         goto dma_map_err;
2374
2375                 priv->tx_skbuff_dma[first_entry].buf = des;
2376                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2377                         first->des0 = cpu_to_le32(des);
2378                 else
2379                         first->des2 = cpu_to_le32(des);
2380
2381                 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2382                 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2383
2384                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2385                              priv->hwts_tx_en)) {
2386                         /* declare that device is doing timestamping */
2387                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2388                         priv->hw->desc->enable_tx_timestamp(first);
2389                 }
2390
2391                 /* Prepare the first descriptor setting the OWN bit too */
2392                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2393                                                 csum_insertion, priv->mode, 1,
2394                                                 last_segment);
2395
2396                 /* The own bit must be the latest setting done when prepare the
2397                  * descriptor and then barrier is needed to make sure that
2398                  * all is coherent before granting the DMA engine.
2399                  */
2400                 dma_wmb();
2401         }
2402
2403         netdev_sent_queue(dev, skb->len);
2404
2405         if (priv->synopsys_id < DWMAC_CORE_4_00)
2406                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2407         else
2408                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2409                                                STMMAC_CHAN0);
2410
2411         return NETDEV_TX_OK;
2412
2413 dma_map_err:
2414         netdev_err(priv->dev, "Tx DMA map failed\n");
2415         dev_kfree_skb(skb);
2416         priv->dev->stats.tx_dropped++;
2417         return NETDEV_TX_OK;
2418 }
2419
2420 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2421 {
2422         struct ethhdr *ehdr;
2423         u16 vlanid;
2424
2425         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2426             NETIF_F_HW_VLAN_CTAG_RX &&
2427             !__vlan_get_tag(skb, &vlanid)) {
2428                 /* pop the vlan tag */
2429                 ehdr = (struct ethhdr *)skb->data;
2430                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2431                 skb_pull(skb, VLAN_HLEN);
2432                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2433         }
2434 }
2435
2436
2437 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2438 {
2439         if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2440                 return 0;
2441
2442         return 1;
2443 }
2444
2445 /**
2446  * stmmac_rx_refill - refill used skb preallocated buffers
2447  * @priv: driver private structure
2448  * Description : this is to reallocate the skb for the reception process
2449  * that is based on zero-copy.
2450  */
2451 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2452 {
2453         int bfsize = priv->dma_buf_sz;
2454         unsigned int entry = priv->dirty_rx;
2455         int dirty = stmmac_rx_dirty(priv);
2456
2457         while (dirty-- > 0) {
2458                 struct dma_desc *p;
2459
2460                 if (priv->extend_desc)
2461                         p = (struct dma_desc *)(priv->dma_erx + entry);
2462                 else
2463                         p = priv->dma_rx + entry;
2464
2465                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2466                         struct sk_buff *skb;
2467
2468                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2469                         if (unlikely(!skb)) {
2470                                 /* so for a while no zero-copy! */
2471                                 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2472                                 if (unlikely(net_ratelimit()))
2473                                         dev_err(priv->device,
2474                                                 "fail to alloc skb entry %d\n",
2475                                                 entry);
2476                                 break;
2477                         }
2478
2479                         priv->rx_skbuff[entry] = skb;
2480                         priv->rx_skbuff_dma[entry] =
2481                             dma_map_single(priv->device, skb->data, bfsize,
2482                                            DMA_FROM_DEVICE);
2483                         if (dma_mapping_error(priv->device,
2484                                               priv->rx_skbuff_dma[entry])) {
2485                                 netdev_err(priv->dev, "Rx DMA map failed\n");
2486                                 dev_kfree_skb(skb);
2487                                 break;
2488                         }
2489
2490                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2491                                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2492                                 p->des1 = 0;
2493                         } else {
2494                                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2495                         }
2496                         if (priv->hw->mode->refill_desc3)
2497                                 priv->hw->mode->refill_desc3(priv, p);
2498
2499                         if (priv->rx_zeroc_thresh > 0)
2500                                 priv->rx_zeroc_thresh--;
2501
2502                         netif_dbg(priv, rx_status, priv->dev,
2503                                   "refill entry #%d\n", entry);
2504                 }
2505                 dma_wmb();
2506
2507                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2508                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2509                 else
2510                         priv->hw->desc->set_rx_owner(p);
2511
2512                 dma_wmb();
2513
2514                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2515         }
2516         priv->dirty_rx = entry;
2517 }
2518
2519 /**
2520  * stmmac_rx - manage the receive process
2521  * @priv: driver private structure
2522  * @limit: napi bugget.
2523  * Description :  this the function called by the napi poll method.
2524  * It gets all the frames inside the ring.
2525  */
2526 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2527 {
2528         unsigned int entry = priv->cur_rx;
2529         unsigned int next_entry;
2530         unsigned int count = 0;
2531         int coe = priv->hw->rx_csum;
2532
2533         if (netif_msg_rx_status(priv)) {
2534                 void *rx_head;
2535
2536                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2537                 if (priv->extend_desc)
2538                         rx_head = (void *)priv->dma_erx;
2539                 else
2540                         rx_head = (void *)priv->dma_rx;
2541
2542                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2543         }
2544         while (count < limit) {
2545                 int status;
2546                 struct dma_desc *p;
2547                 struct dma_desc *np;
2548
2549                 if (priv->extend_desc)
2550                         p = (struct dma_desc *)(priv->dma_erx + entry);
2551                 else
2552                         p = priv->dma_rx + entry;
2553
2554                 /* read the status of the incoming frame */
2555                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2556                                                    &priv->xstats, p);
2557                 /* check if managed by the DMA otherwise go ahead */
2558                 if (unlikely(status & dma_own))
2559                         break;
2560
2561                 count++;
2562
2563                 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2564                 next_entry = priv->cur_rx;
2565
2566                 if (priv->extend_desc)
2567                         np = (struct dma_desc *)(priv->dma_erx + next_entry);
2568                 else
2569                         np = priv->dma_rx + next_entry;
2570
2571                 prefetch(np);
2572
2573                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2574                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2575                                                            &priv->xstats,
2576                                                            priv->dma_erx +
2577                                                            entry);
2578                 if (unlikely(status == discard_frame)) {
2579                         priv->dev->stats.rx_errors++;
2580                         if (priv->hwts_rx_en && !priv->extend_desc) {
2581                                 /* DESC2 & DESC3 will be overwritten by device
2582                                  * with timestamp value, hence reinitialize
2583                                  * them in stmmac_rx_refill() function so that
2584                                  * device can reuse it.
2585                                  */
2586                                 priv->rx_skbuff[entry] = NULL;
2587                                 dma_unmap_single(priv->device,
2588                                                  priv->rx_skbuff_dma[entry],
2589                                                  priv->dma_buf_sz,
2590                                                  DMA_FROM_DEVICE);
2591                         }
2592                 } else {
2593                         struct sk_buff *skb;
2594                         int frame_len;
2595                         unsigned int des;
2596
2597                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2598                                 des = le32_to_cpu(p->des0);
2599                         else
2600                                 des = le32_to_cpu(p->des2);
2601
2602                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2603
2604                         /*  If frame length is greater than skb buffer size
2605                          *  (preallocated during init) then the packet is
2606                          *  ignored
2607                          */
2608                         if (frame_len > priv->dma_buf_sz) {
2609                                 netdev_err(priv->dev,
2610                                            "len %d larger than size (%d)\n",
2611                                            frame_len, priv->dma_buf_sz);
2612                                 priv->dev->stats.rx_length_errors++;
2613                                 break;
2614                         }
2615
2616                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2617                          * Type frames (LLC/LLC-SNAP)
2618                          */
2619                         if (unlikely(status != llc_snap))
2620                                 frame_len -= ETH_FCS_LEN;
2621
2622                         if (netif_msg_rx_status(priv)) {
2623                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2624                                            p, entry, des);
2625                                 if (frame_len > ETH_FRAME_LEN)
2626                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2627                                                    frame_len, status);
2628                         }
2629
2630                         /* The zero-copy is always used for all the sizes
2631                          * in case of GMAC4 because it needs
2632                          * to refill the used descriptors, always.
2633                          */
2634                         if (unlikely(!priv->plat->has_gmac4 &&
2635                                      ((frame_len < priv->rx_copybreak) ||
2636                                      stmmac_rx_threshold_count(priv)))) {
2637                                 skb = netdev_alloc_skb_ip_align(priv->dev,
2638                                                                 frame_len);
2639                                 if (unlikely(!skb)) {
2640                                         if (net_ratelimit())
2641                                                 dev_warn(priv->device,
2642                                                          "packet dropped\n");
2643                                         priv->dev->stats.rx_dropped++;
2644                                         break;
2645                                 }
2646
2647                                 dma_sync_single_for_cpu(priv->device,
2648                                                         priv->rx_skbuff_dma
2649                                                         [entry], frame_len,
2650                                                         DMA_FROM_DEVICE);
2651                                 skb_copy_to_linear_data(skb,
2652                                                         priv->
2653                                                         rx_skbuff[entry]->data,
2654                                                         frame_len);
2655
2656                                 skb_put(skb, frame_len);
2657                                 dma_sync_single_for_device(priv->device,
2658                                                            priv->rx_skbuff_dma
2659                                                            [entry], frame_len,
2660                                                            DMA_FROM_DEVICE);
2661                         } else {
2662                                 skb = priv->rx_skbuff[entry];
2663                                 if (unlikely(!skb)) {
2664                                         netdev_err(priv->dev,
2665                                                    "%s: Inconsistent Rx chain\n",
2666                                                    priv->dev->name);
2667                                         priv->dev->stats.rx_dropped++;
2668                                         break;
2669                                 }
2670                                 prefetch(skb->data - NET_IP_ALIGN);
2671                                 priv->rx_skbuff[entry] = NULL;
2672                                 priv->rx_zeroc_thresh++;
2673
2674                                 skb_put(skb, frame_len);
2675                                 dma_unmap_single(priv->device,
2676                                                  priv->rx_skbuff_dma[entry],
2677                                                  priv->dma_buf_sz,
2678                                                  DMA_FROM_DEVICE);
2679                         }
2680
2681                         if (netif_msg_pktdata(priv)) {
2682                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
2683                                            frame_len);
2684                                 print_pkt(skb->data, frame_len);
2685                         }
2686
2687                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
2688
2689                         stmmac_rx_vlan(priv->dev, skb);
2690
2691                         skb->protocol = eth_type_trans(skb, priv->dev);
2692
2693                         if (unlikely(!coe))
2694                                 skb_checksum_none_assert(skb);
2695                         else
2696                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2697
2698                         napi_gro_receive(&priv->napi, skb);
2699
2700                         priv->dev->stats.rx_packets++;
2701                         priv->dev->stats.rx_bytes += frame_len;
2702                 }
2703                 entry = next_entry;
2704         }
2705
2706         stmmac_rx_refill(priv);
2707
2708         priv->xstats.rx_pkt_n += count;
2709
2710         return count;
2711 }
2712
2713 /**
2714  *  stmmac_poll - stmmac poll method (NAPI)
2715  *  @napi : pointer to the napi structure.
2716  *  @budget : maximum number of packets that the current CPU can receive from
2717  *            all interfaces.
2718  *  Description :
2719  *  To look at the incoming frames and clear the tx resources.
2720  */
2721 static int stmmac_poll(struct napi_struct *napi, int budget)
2722 {
2723         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2724         int work_done = 0;
2725
2726         priv->xstats.napi_poll++;
2727         stmmac_tx_clean(priv);
2728
2729         work_done = stmmac_rx(priv, budget);
2730         if (work_done < budget) {
2731                 napi_complete_done(napi, work_done);
2732                 stmmac_enable_dma_irq(priv);
2733         }
2734         return work_done;
2735 }
2736
2737 /**
2738  *  stmmac_tx_timeout
2739  *  @dev : Pointer to net device structure
2740  *  Description: this function is called when a packet transmission fails to
2741  *   complete within a reasonable time. The driver will mark the error in the
2742  *   netdev structure and arrange for the device to be reset to a sane state
2743  *   in order to transmit a new packet.
2744  */
2745 static void stmmac_tx_timeout(struct net_device *dev)
2746 {
2747         struct stmmac_priv *priv = netdev_priv(dev);
2748
2749         /* Clear Tx resources and restart transmitting again */
2750         stmmac_tx_err(priv);
2751 }
2752
2753 /**
2754  *  stmmac_set_rx_mode - entry point for multicast addressing
2755  *  @dev : pointer to the device structure
2756  *  Description:
2757  *  This function is a driver entry point which gets called by the kernel
2758  *  whenever multicast addresses must be enabled/disabled.
2759  *  Return value:
2760  *  void.
2761  */
2762 static void stmmac_set_rx_mode(struct net_device *dev)
2763 {
2764         struct stmmac_priv *priv = netdev_priv(dev);
2765
2766         priv->hw->mac->set_filter(priv->hw, dev);
2767 }
2768
2769 /**
2770  *  stmmac_change_mtu - entry point to change MTU size for the device.
2771  *  @dev : device pointer.
2772  *  @new_mtu : the new MTU size for the device.
2773  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2774  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2775  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2776  *  Return value:
2777  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2778  *  file on failure.
2779  */
2780 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2781 {
2782         struct stmmac_priv *priv = netdev_priv(dev);
2783
2784         if (netif_running(dev)) {
2785                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
2786                 return -EBUSY;
2787         }
2788
2789         dev->mtu = new_mtu;
2790
2791         netdev_update_features(dev);
2792
2793         return 0;
2794 }
2795
2796 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2797                                              netdev_features_t features)
2798 {
2799         struct stmmac_priv *priv = netdev_priv(dev);
2800
2801         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2802                 features &= ~NETIF_F_RXCSUM;
2803
2804         if (!priv->plat->tx_coe)
2805                 features &= ~NETIF_F_CSUM_MASK;
2806
2807         /* Some GMAC devices have a bugged Jumbo frame support that
2808          * needs to have the Tx COE disabled for oversized frames
2809          * (due to limited buffer sizes). In this case we disable
2810          * the TX csum insertion in the TDES and not use SF.
2811          */
2812         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2813                 features &= ~NETIF_F_CSUM_MASK;
2814
2815         /* Disable tso if asked by ethtool */
2816         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2817                 if (features & NETIF_F_TSO)
2818                         priv->tso = true;
2819                 else
2820                         priv->tso = false;
2821         }
2822
2823         return features;
2824 }
2825
2826 static int stmmac_set_features(struct net_device *netdev,
2827                                netdev_features_t features)
2828 {
2829         struct stmmac_priv *priv = netdev_priv(netdev);
2830
2831         /* Keep the COE Type in case of csum is supporting */
2832         if (features & NETIF_F_RXCSUM)
2833                 priv->hw->rx_csum = priv->plat->rx_coe;
2834         else
2835                 priv->hw->rx_csum = 0;
2836         /* No check needed because rx_coe has been set before and it will be
2837          * fixed in case of issue.
2838          */
2839         priv->hw->mac->rx_ipc(priv->hw);
2840
2841         return 0;
2842 }
2843
2844 /**
2845  *  stmmac_interrupt - main ISR
2846  *  @irq: interrupt number.
2847  *  @dev_id: to pass the net device pointer.
2848  *  Description: this is the main driver interrupt service routine.
2849  *  It can call:
2850  *  o DMA service routine (to manage incoming frame reception and transmission
2851  *    status)
2852  *  o Core interrupts to manage: remote wake-up, management counter, LPI
2853  *    interrupts.
2854  */
2855 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2856 {
2857         struct net_device *dev = (struct net_device *)dev_id;
2858         struct stmmac_priv *priv = netdev_priv(dev);
2859
2860         if (priv->irq_wake)
2861                 pm_wakeup_event(priv->device, 0);
2862
2863         if (unlikely(!dev)) {
2864                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2865                 return IRQ_NONE;
2866         }
2867
2868         /* To handle GMAC own interrupts */
2869         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2870                 int status = priv->hw->mac->host_irq_status(priv->hw,
2871                                                             &priv->xstats);
2872                 if (unlikely(status)) {
2873                         /* For LPI we need to save the tx status */
2874                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2875                                 priv->tx_path_in_lpi_mode = true;
2876                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2877                                 priv->tx_path_in_lpi_mode = false;
2878                         if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2879                                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2880                                                         priv->rx_tail_addr,
2881                                                         STMMAC_CHAN0);
2882                 }
2883
2884                 /* PCS link status */
2885                 if (priv->hw->pcs) {
2886                         if (priv->xstats.pcs_link)
2887                                 netif_carrier_on(dev);
2888                         else
2889                                 netif_carrier_off(dev);
2890                 }
2891         }
2892
2893         /* To handle DMA interrupts */
2894         stmmac_dma_interrupt(priv);
2895
2896         return IRQ_HANDLED;
2897 }
2898
2899 #ifdef CONFIG_NET_POLL_CONTROLLER
2900 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2901  * to allow network I/O with interrupts disabled.
2902  */
2903 static void stmmac_poll_controller(struct net_device *dev)
2904 {
2905         disable_irq(dev->irq);
2906         stmmac_interrupt(dev->irq, dev);
2907         enable_irq(dev->irq);
2908 }
2909 #endif
2910
2911 /**
2912  *  stmmac_ioctl - Entry point for the Ioctl
2913  *  @dev: Device pointer.
2914  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2915  *  a proprietary structure used to pass information to the driver.
2916  *  @cmd: IOCTL command
2917  *  Description:
2918  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2919  */
2920 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2921 {
2922         int ret = -EOPNOTSUPP;
2923
2924         if (!netif_running(dev))
2925                 return -EINVAL;
2926
2927         switch (cmd) {
2928         case SIOCGMIIPHY:
2929         case SIOCGMIIREG:
2930         case SIOCSMIIREG:
2931                 if (!dev->phydev)
2932                         return -EINVAL;
2933                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2934                 break;
2935         case SIOCSHWTSTAMP:
2936                 ret = stmmac_hwtstamp_ioctl(dev, rq);
2937                 break;
2938         default:
2939                 break;
2940         }
2941
2942         return ret;
2943 }
2944
2945 #ifdef CONFIG_DEBUG_FS
2946 static struct dentry *stmmac_fs_dir;
2947
2948 static void sysfs_display_ring(void *head, int size, int extend_desc,
2949                                struct seq_file *seq)
2950 {
2951         int i;
2952         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2953         struct dma_desc *p = (struct dma_desc *)head;
2954
2955         for (i = 0; i < size; i++) {
2956                 if (extend_desc) {
2957                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2958                                    i, (unsigned int)virt_to_phys(ep),
2959                                    le32_to_cpu(ep->basic.des0),
2960                                    le32_to_cpu(ep->basic.des1),
2961                                    le32_to_cpu(ep->basic.des2),
2962                                    le32_to_cpu(ep->basic.des3));
2963                         ep++;
2964                 } else {
2965                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2966                                    i, (unsigned int)virt_to_phys(ep),
2967                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2968                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2969                         p++;
2970                 }
2971                 seq_printf(seq, "\n");
2972         }
2973 }
2974
2975 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2976 {
2977         struct net_device *dev = seq->private;
2978         struct stmmac_priv *priv = netdev_priv(dev);
2979
2980         if (priv->extend_desc) {
2981                 seq_printf(seq, "Extended RX descriptor ring:\n");
2982                 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2983                 seq_printf(seq, "Extended TX descriptor ring:\n");
2984                 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2985         } else {
2986                 seq_printf(seq, "RX descriptor ring:\n");
2987                 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2988                 seq_printf(seq, "TX descriptor ring:\n");
2989                 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2990         }
2991
2992         return 0;
2993 }
2994
2995 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2996 {
2997         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2998 }
2999
3000 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3001
3002 static const struct file_operations stmmac_rings_status_fops = {
3003         .owner = THIS_MODULE,
3004         .open = stmmac_sysfs_ring_open,
3005         .read = seq_read,
3006         .llseek = seq_lseek,
3007         .release = single_release,
3008 };
3009
3010 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3011 {
3012         struct net_device *dev = seq->private;
3013         struct stmmac_priv *priv = netdev_priv(dev);
3014
3015         if (!priv->hw_cap_support) {
3016                 seq_printf(seq, "DMA HW features not supported\n");
3017                 return 0;
3018         }
3019
3020         seq_printf(seq, "==============================\n");
3021         seq_printf(seq, "\tDMA HW features\n");
3022         seq_printf(seq, "==============================\n");
3023
3024         seq_printf(seq, "\t10/100 Mbps: %s\n",
3025                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3026         seq_printf(seq, "\t1000 Mbps: %s\n",
3027                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3028         seq_printf(seq, "\tHalf duplex: %s\n",
3029                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3030         seq_printf(seq, "\tHash Filter: %s\n",
3031                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3032         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3033                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3034         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3035                    (priv->dma_cap.pcs) ? "Y" : "N");
3036         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3037                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3038         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3039                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3040         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3041                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3042         seq_printf(seq, "\tRMON module: %s\n",
3043                    (priv->dma_cap.rmon) ? "Y" : "N");
3044         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3045                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3046         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3047                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3048         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3049                    (priv->dma_cap.eee) ? "Y" : "N");
3050         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3051         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3052                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3053         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3054                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3055                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3056         } else {
3057                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3058                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3059                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3060                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3061         }
3062         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3063                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3064         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3065                    priv->dma_cap.number_rx_channel);
3066         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3067                    priv->dma_cap.number_tx_channel);
3068         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3069                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3070
3071         return 0;
3072 }
3073
3074 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3075 {
3076         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3077 }
3078
3079 static const struct file_operations stmmac_dma_cap_fops = {
3080         .owner = THIS_MODULE,
3081         .open = stmmac_sysfs_dma_cap_open,
3082         .read = seq_read,
3083         .llseek = seq_lseek,
3084         .release = single_release,
3085 };
3086
3087 static int stmmac_init_fs(struct net_device *dev)
3088 {
3089         struct stmmac_priv *priv = netdev_priv(dev);
3090
3091         /* Create per netdev entries */
3092         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3093
3094         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3095                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3096
3097                 return -ENOMEM;
3098         }
3099
3100         /* Entry to report DMA RX/TX rings */
3101         priv->dbgfs_rings_status =
3102                 debugfs_create_file("descriptors_status", S_IRUGO,
3103                                     priv->dbgfs_dir, dev,
3104                                     &stmmac_rings_status_fops);
3105
3106         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3107                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3108                 debugfs_remove_recursive(priv->dbgfs_dir);
3109
3110                 return -ENOMEM;
3111         }
3112
3113         /* Entry to report the DMA HW features */
3114         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3115                                             priv->dbgfs_dir,
3116                                             dev, &stmmac_dma_cap_fops);
3117
3118         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3119                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3120                 debugfs_remove_recursive(priv->dbgfs_dir);
3121
3122                 return -ENOMEM;
3123         }
3124
3125         return 0;
3126 }
3127
3128 static void stmmac_exit_fs(struct net_device *dev)
3129 {
3130         struct stmmac_priv *priv = netdev_priv(dev);
3131
3132         debugfs_remove_recursive(priv->dbgfs_dir);
3133 }
3134 #endif /* CONFIG_DEBUG_FS */
3135
3136 static const struct net_device_ops stmmac_netdev_ops = {
3137         .ndo_open = stmmac_open,
3138         .ndo_start_xmit = stmmac_xmit,
3139         .ndo_stop = stmmac_release,
3140         .ndo_change_mtu = stmmac_change_mtu,
3141         .ndo_fix_features = stmmac_fix_features,
3142         .ndo_set_features = stmmac_set_features,
3143         .ndo_set_rx_mode = stmmac_set_rx_mode,
3144         .ndo_tx_timeout = stmmac_tx_timeout,
3145         .ndo_do_ioctl = stmmac_ioctl,
3146 #ifdef CONFIG_NET_POLL_CONTROLLER
3147         .ndo_poll_controller = stmmac_poll_controller,
3148 #endif
3149         .ndo_set_mac_address = eth_mac_addr,
3150 };
3151
3152 /**
3153  *  stmmac_hw_init - Init the MAC device
3154  *  @priv: driver private structure
3155  *  Description: this function is to configure the MAC device according to
3156  *  some platform parameters or the HW capability register. It prepares the
3157  *  driver to use either ring or chain modes and to setup either enhanced or
3158  *  normal descriptors.
3159  */
3160 static int stmmac_hw_init(struct stmmac_priv *priv)
3161 {
3162         struct mac_device_info *mac;
3163
3164         /* Identify the MAC HW device */
3165         if (priv->plat->has_gmac) {
3166                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3167                 mac = dwmac1000_setup(priv->ioaddr,
3168                                       priv->plat->multicast_filter_bins,
3169                                       priv->plat->unicast_filter_entries,
3170                                       &priv->synopsys_id);
3171         } else if (priv->plat->has_gmac4) {
3172                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3173                 mac = dwmac4_setup(priv->ioaddr,
3174                                    priv->plat->multicast_filter_bins,
3175                                    priv->plat->unicast_filter_entries,
3176                                    &priv->synopsys_id);
3177         } else {
3178                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3179         }
3180         if (!mac)
3181                 return -ENOMEM;
3182
3183         priv->hw = mac;
3184
3185         /* To use the chained or ring mode */
3186         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3187                 priv->hw->mode = &dwmac4_ring_mode_ops;
3188         } else {
3189                 if (chain_mode) {
3190                         priv->hw->mode = &chain_mode_ops;
3191                         dev_info(priv->device, "Chain mode enabled\n");
3192                         priv->mode = STMMAC_CHAIN_MODE;
3193                 } else {
3194                         priv->hw->mode = &ring_mode_ops;
3195                         dev_info(priv->device, "Ring mode enabled\n");
3196                         priv->mode = STMMAC_RING_MODE;
3197                 }
3198         }
3199
3200         /* Get the HW capability (new GMAC newer than 3.50a) */
3201         priv->hw_cap_support = stmmac_get_hw_features(priv);
3202         if (priv->hw_cap_support) {
3203                 dev_info(priv->device, "DMA HW capability register supported\n");
3204
3205                 /* We can override some gmac/dma configuration fields: e.g.
3206                  * enh_desc, tx_coe (e.g. that are passed through the
3207                  * platform) with the values from the HW capability
3208                  * register (if supported).
3209                  */
3210                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3211                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3212                 priv->hw->pmt = priv->plat->pmt;
3213
3214                 /* TXCOE doesn't work in thresh DMA mode */
3215                 if (priv->plat->force_thresh_dma_mode)
3216                         priv->plat->tx_coe = 0;
3217                 else
3218                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3219
3220                 /* In case of GMAC4 rx_coe is from HW cap register. */
3221                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3222
3223                 if (priv->dma_cap.rx_coe_type2)
3224                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3225                 else if (priv->dma_cap.rx_coe_type1)
3226                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3227
3228         } else {
3229                 dev_info(priv->device, "No HW DMA feature register supported\n");
3230         }
3231
3232         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3233         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3234                 priv->hw->desc = &dwmac4_desc_ops;
3235         else
3236                 stmmac_selec_desc_mode(priv);
3237
3238         if (priv->plat->rx_coe) {
3239                 priv->hw->rx_csum = priv->plat->rx_coe;
3240                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3241                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3242                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3243         }
3244         if (priv->plat->tx_coe)
3245                 dev_info(priv->device, "TX Checksum insertion supported\n");
3246
3247         if (priv->plat->pmt) {
3248                 dev_info(priv->device, "Wake-Up On Lan supported\n");
3249                 device_set_wakeup_capable(priv->device, 1);
3250         }
3251
3252         if (priv->dma_cap.tsoen)
3253                 dev_info(priv->device, "TSO supported\n");
3254
3255         return 0;
3256 }
3257
3258 /**
3259  * stmmac_dvr_probe
3260  * @device: device pointer
3261  * @plat_dat: platform data pointer
3262  * @res: stmmac resource pointer
3263  * Description: this is the main probe function used to
3264  * call the alloc_etherdev, allocate the priv structure.
3265  * Return:
3266  * returns 0 on success, otherwise errno.
3267  */
3268 int stmmac_dvr_probe(struct device *device,
3269                      struct plat_stmmacenet_data *plat_dat,
3270                      struct stmmac_resources *res)
3271 {
3272         int ret = 0;
3273         struct net_device *ndev = NULL;
3274         struct stmmac_priv *priv;
3275
3276         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3277         if (!ndev)
3278                 return -ENOMEM;
3279
3280         SET_NETDEV_DEV(ndev, device);
3281
3282         priv = netdev_priv(ndev);
3283         priv->device = device;
3284         priv->dev = ndev;
3285
3286         stmmac_set_ethtool_ops(ndev);
3287         priv->pause = pause;
3288         priv->plat = plat_dat;
3289         priv->ioaddr = res->addr;
3290         priv->dev->base_addr = (unsigned long)res->addr;
3291
3292         priv->dev->irq = res->irq;
3293         priv->wol_irq = res->wol_irq;
3294         priv->lpi_irq = res->lpi_irq;
3295
3296         if (res->mac)
3297                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3298
3299         dev_set_drvdata(device, priv->dev);
3300
3301         /* Verify driver arguments */
3302         stmmac_verify_args();
3303
3304         /* Override with kernel parameters if supplied XXX CRS XXX
3305          * this needs to have multiple instances
3306          */
3307         if ((phyaddr >= 0) && (phyaddr <= 31))
3308                 priv->plat->phy_addr = phyaddr;
3309
3310         if (priv->plat->stmmac_rst)
3311                 reset_control_deassert(priv->plat->stmmac_rst);
3312
3313         /* Init MAC and get the capabilities */
3314         ret = stmmac_hw_init(priv);
3315         if (ret)
3316                 goto error_hw_init;
3317
3318         ndev->netdev_ops = &stmmac_netdev_ops;
3319
3320         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3321                             NETIF_F_RXCSUM;
3322
3323         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3324                 ndev->hw_features |= NETIF_F_TSO;
3325                 priv->tso = true;
3326                 dev_info(priv->device, "TSO feature enabled\n");
3327         }
3328         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3329         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3330 #ifdef STMMAC_VLAN_TAG_USED
3331         /* Both mac100 and gmac support receive VLAN tag detection */
3332         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3333 #endif
3334         priv->msg_enable = netif_msg_init(debug, default_msg_level);
3335
3336         /* MTU range: 46 - hw-specific max */
3337         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3338         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3339                 ndev->max_mtu = JUMBO_LEN;
3340         else
3341                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3342         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3343          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3344          */
3345         if ((priv->plat->maxmtu < ndev->max_mtu) &&
3346             (priv->plat->maxmtu >= ndev->min_mtu))
3347                 ndev->max_mtu = priv->plat->maxmtu;
3348         else if (priv->plat->maxmtu < ndev->min_mtu)
3349                 dev_warn(priv->device,
3350                          "%s: warning: maxmtu having invalid value (%d)\n",
3351                          __func__, priv->plat->maxmtu);
3352
3353         if (flow_ctrl)
3354                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
3355
3356         /* Rx Watchdog is available in the COREs newer than the 3.40.
3357          * In some case, for example on bugged HW this feature
3358          * has to be disable and this can be done by passing the
3359          * riwt_off field from the platform.
3360          */
3361         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3362                 priv->use_riwt = 1;
3363                 dev_info(priv->device,
3364                          "Enable RX Mitigation via HW Watchdog Timer\n");
3365         }
3366
3367         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3368
3369         spin_lock_init(&priv->lock);
3370
3371         /* If a specific clk_csr value is passed from the platform
3372          * this means that the CSR Clock Range selection cannot be
3373          * changed at run-time and it is fixed. Viceversa the driver'll try to
3374          * set the MDC clock dynamically according to the csr actual
3375          * clock input.
3376          */
3377         if (!priv->plat->clk_csr)
3378                 stmmac_clk_csr_set(priv);
3379         else
3380                 priv->clk_csr = priv->plat->clk_csr;
3381
3382         stmmac_check_pcs_mode(priv);
3383
3384         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
3385             priv->hw->pcs != STMMAC_PCS_TBI &&
3386             priv->hw->pcs != STMMAC_PCS_RTBI) {
3387                 /* MDIO bus Registration */
3388                 ret = stmmac_mdio_register(ndev);
3389                 if (ret < 0) {
3390                         dev_err(priv->device,
3391                                 "%s: MDIO bus (id: %d) registration failed",
3392                                 __func__, priv->plat->bus_id);
3393                         goto error_mdio_register;
3394                 }
3395         }
3396
3397         ret = register_netdev(ndev);
3398         if (ret) {
3399                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3400                         __func__, ret);
3401                 goto error_netdev_register;
3402         }
3403
3404         return ret;
3405
3406 error_netdev_register:
3407         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3408             priv->hw->pcs != STMMAC_PCS_TBI &&
3409             priv->hw->pcs != STMMAC_PCS_RTBI)
3410                 stmmac_mdio_unregister(ndev);
3411 error_mdio_register:
3412         netif_napi_del(&priv->napi);
3413 error_hw_init:
3414         free_netdev(ndev);
3415
3416         return ret;
3417 }
3418 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3419
3420 /**
3421  * stmmac_dvr_remove
3422  * @dev: device pointer
3423  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3424  * changes the link status, releases the DMA descriptor rings.
3425  */
3426 int stmmac_dvr_remove(struct device *dev)
3427 {
3428         struct net_device *ndev = dev_get_drvdata(dev);
3429         struct stmmac_priv *priv = netdev_priv(ndev);
3430
3431         netdev_info(priv->dev, "%s: removing driver", __func__);
3432
3433         priv->hw->dma->stop_rx(priv->ioaddr);
3434         priv->hw->dma->stop_tx(priv->ioaddr);
3435
3436         stmmac_set_mac(priv->ioaddr, false);
3437         netif_carrier_off(ndev);
3438         unregister_netdev(ndev);
3439         if (priv->plat->stmmac_rst)
3440                 reset_control_assert(priv->plat->stmmac_rst);
3441         clk_disable_unprepare(priv->plat->pclk);
3442         clk_disable_unprepare(priv->plat->stmmac_clk);
3443         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3444             priv->hw->pcs != STMMAC_PCS_TBI &&
3445             priv->hw->pcs != STMMAC_PCS_RTBI)
3446                 stmmac_mdio_unregister(ndev);
3447         free_netdev(ndev);
3448
3449         return 0;
3450 }
3451 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3452
3453 /**
3454  * stmmac_suspend - suspend callback
3455  * @dev: device pointer
3456  * Description: this is the function to suspend the device and it is called
3457  * by the platform driver to stop the network queue, release the resources,
3458  * program the PMT register (for WoL), clean and release driver resources.
3459  */
3460 int stmmac_suspend(struct device *dev)
3461 {
3462         struct net_device *ndev = dev_get_drvdata(dev);
3463         struct stmmac_priv *priv = netdev_priv(ndev);
3464         unsigned long flags;
3465
3466         if (!ndev || !netif_running(ndev))
3467                 return 0;
3468
3469         if (ndev->phydev)
3470                 phy_stop(ndev->phydev);
3471
3472         spin_lock_irqsave(&priv->lock, flags);
3473
3474         netif_device_detach(ndev);
3475         netif_stop_queue(ndev);
3476
3477         napi_disable(&priv->napi);
3478
3479         /* Stop TX/RX DMA */
3480         priv->hw->dma->stop_tx(priv->ioaddr);
3481         priv->hw->dma->stop_rx(priv->ioaddr);
3482
3483         /* Enable Power down mode by programming the PMT regs */
3484         if (device_may_wakeup(priv->device)) {
3485                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3486                 priv->irq_wake = 1;
3487         } else {
3488                 stmmac_set_mac(priv->ioaddr, false);
3489                 pinctrl_pm_select_sleep_state(priv->device);
3490                 /* Disable clock in case of PWM is off */
3491                 clk_disable(priv->plat->pclk);
3492                 clk_disable(priv->plat->stmmac_clk);
3493         }
3494         spin_unlock_irqrestore(&priv->lock, flags);
3495
3496         priv->oldlink = 0;
3497         priv->speed = SPEED_UNKNOWN;
3498         priv->oldduplex = DUPLEX_UNKNOWN;
3499         return 0;
3500 }
3501 EXPORT_SYMBOL_GPL(stmmac_suspend);
3502
3503 /**
3504  * stmmac_resume - resume callback
3505  * @dev: device pointer
3506  * Description: when resume this function is invoked to setup the DMA and CORE
3507  * in a usable state.
3508  */
3509 int stmmac_resume(struct device *dev)
3510 {
3511         struct net_device *ndev = dev_get_drvdata(dev);
3512         struct stmmac_priv *priv = netdev_priv(ndev);
3513         unsigned long flags;
3514
3515         if (!netif_running(ndev))
3516                 return 0;
3517
3518         /* Power Down bit, into the PM register, is cleared
3519          * automatically as soon as a magic packet or a Wake-up frame
3520          * is received. Anyway, it's better to manually clear
3521          * this bit because it can generate problems while resuming
3522          * from another devices (e.g. serial console).
3523          */
3524         if (device_may_wakeup(priv->device)) {
3525                 spin_lock_irqsave(&priv->lock, flags);
3526                 priv->hw->mac->pmt(priv->hw, 0);
3527                 spin_unlock_irqrestore(&priv->lock, flags);
3528                 priv->irq_wake = 0;
3529         } else {
3530                 pinctrl_pm_select_default_state(priv->device);
3531                 /* enable the clk previously disabled */
3532                 clk_enable(priv->plat->stmmac_clk);
3533                 clk_enable(priv->plat->pclk);
3534                 /* reset the phy so that it's ready */
3535                 if (priv->mii)
3536                         stmmac_mdio_reset(priv->mii);
3537         }
3538
3539         netif_device_attach(ndev);
3540
3541         spin_lock_irqsave(&priv->lock, flags);
3542
3543         priv->cur_rx = 0;
3544         priv->dirty_rx = 0;
3545         priv->dirty_tx = 0;
3546         priv->cur_tx = 0;
3547         /* reset private mss value to force mss context settings at
3548          * next tso xmit (only used for gmac4).
3549          */
3550         priv->mss = 0;
3551
3552         stmmac_clear_descriptors(priv);
3553
3554         stmmac_hw_setup(ndev, false);
3555         stmmac_init_tx_coalesce(priv);
3556         stmmac_set_rx_mode(ndev);
3557
3558         napi_enable(&priv->napi);
3559
3560         netif_start_queue(ndev);
3561
3562         spin_unlock_irqrestore(&priv->lock, flags);
3563
3564         if (ndev->phydev)
3565                 phy_start(ndev->phydev);
3566
3567         return 0;
3568 }
3569 EXPORT_SYMBOL_GPL(stmmac_resume);
3570
3571 #ifndef MODULE
3572 static int __init stmmac_cmdline_opt(char *str)
3573 {
3574         char *opt;
3575
3576         if (!str || !*str)
3577                 return -EINVAL;
3578         while ((opt = strsep(&str, ",")) != NULL) {
3579                 if (!strncmp(opt, "debug:", 6)) {
3580                         if (kstrtoint(opt + 6, 0, &debug))
3581                                 goto err;
3582                 } else if (!strncmp(opt, "phyaddr:", 8)) {
3583                         if (kstrtoint(opt + 8, 0, &phyaddr))
3584                                 goto err;
3585                 } else if (!strncmp(opt, "buf_sz:", 7)) {
3586                         if (kstrtoint(opt + 7, 0, &buf_sz))
3587                                 goto err;
3588                 } else if (!strncmp(opt, "tc:", 3)) {
3589                         if (kstrtoint(opt + 3, 0, &tc))
3590                                 goto err;
3591                 } else if (!strncmp(opt, "watchdog:", 9)) {
3592                         if (kstrtoint(opt + 9, 0, &watchdog))
3593                                 goto err;
3594                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3595                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3596                                 goto err;
3597                 } else if (!strncmp(opt, "pause:", 6)) {
3598                         if (kstrtoint(opt + 6, 0, &pause))
3599                                 goto err;
3600                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3601                         if (kstrtoint(opt + 10, 0, &eee_timer))
3602                                 goto err;
3603                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3604                         if (kstrtoint(opt + 11, 0, &chain_mode))
3605                                 goto err;
3606                 }
3607         }
3608         return 0;
3609
3610 err:
3611         pr_err("%s: ERROR broken module parameter conversion", __func__);
3612         return -EINVAL;
3613 }
3614
3615 __setup("stmmaceth=", stmmac_cmdline_opt);
3616 #endif /* MODULE */
3617
3618 static int __init stmmac_init(void)
3619 {
3620 #ifdef CONFIG_DEBUG_FS
3621         /* Create debugfs main directory if it doesn't exist yet */
3622         if (!stmmac_fs_dir) {
3623                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3624
3625                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3626                         pr_err("ERROR %s, debugfs create directory failed\n",
3627                                STMMAC_RESOURCE_NAME);
3628
3629                         return -ENOMEM;
3630                 }
3631         }
3632 #endif
3633
3634         return 0;
3635 }
3636
3637 static void __exit stmmac_exit(void)
3638 {
3639 #ifdef CONFIG_DEBUG_FS
3640         debugfs_remove_recursive(stmmac_fs_dir);
3641 #endif
3642 }
3643
3644 module_init(stmmac_init)
3645 module_exit(stmmac_exit)
3646
3647 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3648 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3649 MODULE_LICENSE("GPL");