]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: configure tx queue weight
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190         u32 avail;
191
192         if (priv->dirty_tx > priv->cur_tx)
193                 avail = priv->dirty_tx - priv->cur_tx - 1;
194         else
195                 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197         return avail;
198 }
199
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202         u32 dirty;
203
204         if (priv->dirty_rx <= priv->cur_rx)
205                 dirty = priv->cur_rx - priv->dirty_rx;
206         else
207                 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209         return dirty;
210 }
211
212 /**
213  * stmmac_hw_fix_mac_speed - callback for speed selection
214  * @priv: driver private structure
215  * Description: on some platforms (e.g. ST), some HW system configuration
216  * registers have to be set according to the link speed negotiated.
217  */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220         struct net_device *ndev = priv->dev;
221         struct phy_device *phydev = ndev->phydev;
222
223         if (likely(priv->plat->fix_mac_speed))
224                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228  * stmmac_enable_eee_mode - check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode in case of
231  * EEE.
232  */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235         /* Check and enter in LPI mode */
236         if ((priv->dirty_tx == priv->cur_tx) &&
237             (priv->tx_path_in_lpi_mode == false))
238                 priv->hw->mac->set_eee_mode(priv->hw,
239                                             priv->plat->en_tx_lpi_clockgating);
240 }
241
242 /**
243  * stmmac_disable_eee_mode - disable and exit from LPI mode
244  * @priv: driver private structure
245  * Description: this function is to exit and disable EEE in case of
246  * LPI state is true. This is called by the xmit.
247  */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250         priv->hw->mac->reset_eee_mode(priv->hw);
251         del_timer_sync(&priv->eee_ctrl_timer);
252         priv->tx_path_in_lpi_mode = false;
253 }
254
255 /**
256  * stmmac_eee_ctrl_timer - EEE TX SW timer.
257  * @arg : data hook
258  * Description:
259  *  if there is no data transfer and if we are not in LPI state,
260  *  then MAC Transmitter can be moved to LPI state.
261  */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266         stmmac_enable_eee_mode(priv);
267         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269
270 /**
271  * stmmac_eee_init - init EEE
272  * @priv: driver private structure
273  * Description:
274  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
275  *  can also manage EEE, this function enable the LPI state and start related
276  *  timer.
277  */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280         struct net_device *ndev = priv->dev;
281         unsigned long flags;
282         bool ret = false;
283
284         /* Using PCS we cannot dial with the phy registers at this stage
285          * so we do not support extra feature like EEE.
286          */
287         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288             (priv->hw->pcs == STMMAC_PCS_TBI) ||
289             (priv->hw->pcs == STMMAC_PCS_RTBI))
290                 goto out;
291
292         /* MAC core supports the EEE feature. */
293         if (priv->dma_cap.eee) {
294                 int tx_lpi_timer = priv->tx_lpi_timer;
295
296                 /* Check if the PHY supports EEE */
297                 if (phy_init_eee(ndev->phydev, 1)) {
298                         /* To manage at run-time if the EEE cannot be supported
299                          * anymore (for example because the lp caps have been
300                          * changed).
301                          * In that case the driver disable own timers.
302                          */
303                         spin_lock_irqsave(&priv->lock, flags);
304                         if (priv->eee_active) {
305                                 netdev_dbg(priv->dev, "disable EEE\n");
306                                 del_timer_sync(&priv->eee_ctrl_timer);
307                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
308                                                              tx_lpi_timer);
309                         }
310                         priv->eee_active = 0;
311                         spin_unlock_irqrestore(&priv->lock, flags);
312                         goto out;
313                 }
314                 /* Activate the EEE and start timers */
315                 spin_lock_irqsave(&priv->lock, flags);
316                 if (!priv->eee_active) {
317                         priv->eee_active = 1;
318                         setup_timer(&priv->eee_ctrl_timer,
319                                     stmmac_eee_ctrl_timer,
320                                     (unsigned long)priv);
321                         mod_timer(&priv->eee_ctrl_timer,
322                                   STMMAC_LPI_T(eee_timer));
323
324                         priv->hw->mac->set_eee_timer(priv->hw,
325                                                      STMMAC_DEFAULT_LIT_LS,
326                                                      tx_lpi_timer);
327                 }
328                 /* Set HW EEE according to the speed */
329                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330
331                 ret = true;
332                 spin_unlock_irqrestore(&priv->lock, flags);
333
334                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335         }
336 out:
337         return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @p : descriptor pointer
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349                                    struct dma_desc *p, struct sk_buff *skb)
350 {
351         struct skb_shared_hwtstamps shhwtstamp;
352         u64 ns;
353
354         if (!priv->hwts_tx_en)
355                 return;
356
357         /* exit if skb doesn't support hw tstamp */
358         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359                 return;
360
361         /* check tx tstamp status */
362         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363                 /* get the valid tstamp */
364                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365
366                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
368
369                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370                 /* pass tstamp to stack */
371                 skb_tstamp_tx(skb, &shhwtstamp);
372         }
373
374         return;
375 }
376
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378  * @priv: driver private structure
379  * @p : descriptor pointer
380  * @np : next descriptor pointer
381  * @skb : the socket buffer
382  * Description :
383  * This function will read received packet's timestamp from the descriptor
384  * and pass it to stack. It also perform some sanity checks.
385  */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387                                    struct dma_desc *np, struct sk_buff *skb)
388 {
389         struct skb_shared_hwtstamps *shhwtstamp = NULL;
390         u64 ns;
391
392         if (!priv->hwts_rx_en)
393                 return;
394
395         /* Check if timestamp is available */
396         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397                 /* For GMAC4, the valid timestamp is from CTX next desc. */
398                 if (priv->plat->has_gmac4)
399                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400                 else
401                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402
403                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404                 shhwtstamp = skb_hwtstamps(skb);
405                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407         } else  {
408                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409         }
410 }
411
412 /**
413  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
414  *  @dev: device pointer.
415  *  @ifr: An IOCTL specific structure, that can contain a pointer to
416  *  a proprietary structure used to pass information to the driver.
417  *  Description:
418  *  This function configures the MAC to enable/disable both outgoing(TX)
419  *  and incoming(RX) packets time stamping based on user input.
420  *  Return Value:
421  *  0 on success and an appropriate -ve integer on failure.
422  */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425         struct stmmac_priv *priv = netdev_priv(dev);
426         struct hwtstamp_config config;
427         struct timespec64 now;
428         u64 temp = 0;
429         u32 ptp_v2 = 0;
430         u32 tstamp_all = 0;
431         u32 ptp_over_ipv4_udp = 0;
432         u32 ptp_over_ipv6_udp = 0;
433         u32 ptp_over_ethernet = 0;
434         u32 snap_type_sel = 0;
435         u32 ts_master_en = 0;
436         u32 ts_event_en = 0;
437         u32 value = 0;
438         u32 sec_inc;
439
440         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441                 netdev_alert(priv->dev, "No support for HW time stamping\n");
442                 priv->hwts_tx_en = 0;
443                 priv->hwts_rx_en = 0;
444
445                 return -EOPNOTSUPP;
446         }
447
448         if (copy_from_user(&config, ifr->ifr_data,
449                            sizeof(struct hwtstamp_config)))
450                 return -EFAULT;
451
452         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453                    __func__, config.flags, config.tx_type, config.rx_filter);
454
455         /* reserved for future extensions */
456         if (config.flags)
457                 return -EINVAL;
458
459         if (config.tx_type != HWTSTAMP_TX_OFF &&
460             config.tx_type != HWTSTAMP_TX_ON)
461                 return -ERANGE;
462
463         if (priv->adv_ts) {
464                 switch (config.rx_filter) {
465                 case HWTSTAMP_FILTER_NONE:
466                         /* time stamp no incoming packet at all */
467                         config.rx_filter = HWTSTAMP_FILTER_NONE;
468                         break;
469
470                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471                         /* PTP v1, UDP, any kind of event packet */
472                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473                         /* take time stamp for all event messages */
474                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478                         break;
479
480                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481                         /* PTP v1, UDP, Sync packet */
482                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483                         /* take time stamp for SYNC messages only */
484                         ts_event_en = PTP_TCR_TSEVNTENA;
485
486                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488                         break;
489
490                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491                         /* PTP v1, UDP, Delay_req packet */
492                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493                         /* take time stamp for Delay_Req messages only */
494                         ts_master_en = PTP_TCR_TSMSTRENA;
495                         ts_event_en = PTP_TCR_TSEVNTENA;
496
497                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499                         break;
500
501                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502                         /* PTP v2, UDP, any kind of event packet */
503                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504                         ptp_v2 = PTP_TCR_TSVER2ENA;
505                         /* take time stamp for all event messages */
506                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510                         break;
511
512                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513                         /* PTP v2, UDP, Sync packet */
514                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515                         ptp_v2 = PTP_TCR_TSVER2ENA;
516                         /* take time stamp for SYNC messages only */
517                         ts_event_en = PTP_TCR_TSEVNTENA;
518
519                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521                         break;
522
523                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524                         /* PTP v2, UDP, Delay_req packet */
525                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526                         ptp_v2 = PTP_TCR_TSVER2ENA;
527                         /* take time stamp for Delay_Req messages only */
528                         ts_master_en = PTP_TCR_TSMSTRENA;
529                         ts_event_en = PTP_TCR_TSEVNTENA;
530
531                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533                         break;
534
535                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
536                         /* PTP v2/802.AS1 any layer, any kind of event packet */
537                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538                         ptp_v2 = PTP_TCR_TSVER2ENA;
539                         /* take time stamp for all event messages */
540                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544                         ptp_over_ethernet = PTP_TCR_TSIPENA;
545                         break;
546
547                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
548                         /* PTP v2/802.AS1, any layer, Sync packet */
549                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550                         ptp_v2 = PTP_TCR_TSVER2ENA;
551                         /* take time stamp for SYNC messages only */
552                         ts_event_en = PTP_TCR_TSEVNTENA;
553
554                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556                         ptp_over_ethernet = PTP_TCR_TSIPENA;
557                         break;
558
559                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560                         /* PTP v2/802.AS1, any layer, Delay_req packet */
561                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562                         ptp_v2 = PTP_TCR_TSVER2ENA;
563                         /* take time stamp for Delay_Req messages only */
564                         ts_master_en = PTP_TCR_TSMSTRENA;
565                         ts_event_en = PTP_TCR_TSEVNTENA;
566
567                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569                         ptp_over_ethernet = PTP_TCR_TSIPENA;
570                         break;
571
572                 case HWTSTAMP_FILTER_ALL:
573                         /* time stamp any incoming packet */
574                         config.rx_filter = HWTSTAMP_FILTER_ALL;
575                         tstamp_all = PTP_TCR_TSENALL;
576                         break;
577
578                 default:
579                         return -ERANGE;
580                 }
581         } else {
582                 switch (config.rx_filter) {
583                 case HWTSTAMP_FILTER_NONE:
584                         config.rx_filter = HWTSTAMP_FILTER_NONE;
585                         break;
586                 default:
587                         /* PTP v1, UDP, any kind of event packet */
588                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589                         break;
590                 }
591         }
592         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594
595         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597         else {
598                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599                          tstamp_all | ptp_v2 | ptp_over_ethernet |
600                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601                          ts_master_en | snap_type_sel);
602                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603
604                 /* program Sub Second Increment reg */
605                 sec_inc = priv->hw->ptp->config_sub_second_increment(
606                         priv->ptpaddr, priv->plat->clk_ptp_rate,
607                         priv->plat->has_gmac4);
608                 temp = div_u64(1000000000ULL, sec_inc);
609
610                 /* calculate default added value:
611                  * formula is :
612                  * addend = (2^32)/freq_div_ratio;
613                  * where, freq_div_ratio = 1e9ns/sec_inc
614                  */
615                 temp = (u64)(temp << 32);
616                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617                 priv->hw->ptp->config_addend(priv->ptpaddr,
618                                              priv->default_addend);
619
620                 /* initialize system time */
621                 ktime_get_real_ts64(&now);
622
623                 /* lower 32 bits of tv_sec are safe until y2106 */
624                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625                                             now.tv_nsec);
626         }
627
628         return copy_to_user(ifr->ifr_data, &config,
629                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631
632 /**
633  * stmmac_init_ptp - init PTP
634  * @priv: driver private structure
635  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636  * This is done by looking at the HW cap. register.
637  * This function also registers the ptp driver.
638  */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642                 return -EOPNOTSUPP;
643
644         priv->adv_ts = 0;
645         /* Check if adv_ts can be enabled for dwmac 4.x core */
646         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647                 priv->adv_ts = 1;
648         /* Dwmac 3.x core with extend_desc can support adv_ts */
649         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650                 priv->adv_ts = 1;
651
652         if (priv->dma_cap.time_stamp)
653                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654
655         if (priv->adv_ts)
656                 netdev_info(priv->dev,
657                             "IEEE 1588-2008 Advanced Timestamp supported\n");
658
659         priv->hw->ptp = &stmmac_ptp;
660         priv->hwts_tx_en = 0;
661         priv->hwts_rx_en = 0;
662
663         stmmac_ptp_register(priv);
664
665         return 0;
666 }
667
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670         if (priv->plat->clk_ptp_ref)
671                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
672         stmmac_ptp_unregister(priv);
673 }
674
675 /**
676  * stmmac_adjust_link - adjusts the link parameters
677  * @dev: net device structure
678  * Description: this is the helper called by the physical abstraction layer
679  * drivers to communicate the phy link status. According the speed and duplex
680  * this driver can invoke registered glue-logic as well.
681  * It also invoke the eee initialization because it could happen when switch
682  * on different networks (that are eee capable).
683  */
684 static void stmmac_adjust_link(struct net_device *dev)
685 {
686         struct stmmac_priv *priv = netdev_priv(dev);
687         struct phy_device *phydev = dev->phydev;
688         unsigned long flags;
689         int new_state = 0;
690         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691
692         if (!phydev)
693                 return;
694
695         spin_lock_irqsave(&priv->lock, flags);
696
697         if (phydev->link) {
698                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
699
700                 /* Now we make sure that we can be in full duplex mode.
701                  * If not, we operate in half-duplex mode. */
702                 if (phydev->duplex != priv->oldduplex) {
703                         new_state = 1;
704                         if (!(phydev->duplex))
705                                 ctrl &= ~priv->hw->link.duplex;
706                         else
707                                 ctrl |= priv->hw->link.duplex;
708                         priv->oldduplex = phydev->duplex;
709                 }
710                 /* Flow Control operation */
711                 if (phydev->pause)
712                         priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
713                                                  fc, pause_time);
714
715                 if (phydev->speed != priv->speed) {
716                         new_state = 1;
717                         switch (phydev->speed) {
718                         case 1000:
719                                 if (priv->plat->has_gmac ||
720                                     priv->plat->has_gmac4)
721                                         ctrl &= ~priv->hw->link.port;
722                                 break;
723                         case 100:
724                                 if (priv->plat->has_gmac ||
725                                     priv->plat->has_gmac4) {
726                                         ctrl |= priv->hw->link.port;
727                                         ctrl |= priv->hw->link.speed;
728                                 } else {
729                                         ctrl &= ~priv->hw->link.port;
730                                 }
731                                 break;
732                         case 10:
733                                 if (priv->plat->has_gmac ||
734                                     priv->plat->has_gmac4) {
735                                         ctrl |= priv->hw->link.port;
736                                         ctrl &= ~(priv->hw->link.speed);
737                                 } else {
738                                         ctrl &= ~priv->hw->link.port;
739                                 }
740                                 break;
741                         default:
742                                 netif_warn(priv, link, priv->dev,
743                                            "broken speed: %d\n", phydev->speed);
744                                 phydev->speed = SPEED_UNKNOWN;
745                                 break;
746                         }
747                         if (phydev->speed != SPEED_UNKNOWN)
748                                 stmmac_hw_fix_mac_speed(priv);
749                         priv->speed = phydev->speed;
750                 }
751
752                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
753
754                 if (!priv->oldlink) {
755                         new_state = 1;
756                         priv->oldlink = 1;
757                 }
758         } else if (priv->oldlink) {
759                 new_state = 1;
760                 priv->oldlink = 0;
761                 priv->speed = SPEED_UNKNOWN;
762                 priv->oldduplex = DUPLEX_UNKNOWN;
763         }
764
765         if (new_state && netif_msg_link(priv))
766                 phy_print_status(phydev);
767
768         spin_unlock_irqrestore(&priv->lock, flags);
769
770         if (phydev->is_pseudo_fixed_link)
771                 /* Stop PHY layer to call the hook to adjust the link in case
772                  * of a switch is attached to the stmmac driver.
773                  */
774                 phydev->irq = PHY_IGNORE_INTERRUPT;
775         else
776                 /* At this stage, init the EEE if supported.
777                  * Never called in case of fixed_link.
778                  */
779                 priv->eee_enabled = stmmac_eee_init(priv);
780 }
781
782 /**
783  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
784  * @priv: driver private structure
785  * Description: this is to verify if the HW supports the PCS.
786  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
787  * configured for the TBI, RTBI, or SGMII PHY interface.
788  */
789 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
790 {
791         int interface = priv->plat->interface;
792
793         if (priv->dma_cap.pcs) {
794                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
795                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
796                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
797                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
798                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
799                         priv->hw->pcs = STMMAC_PCS_RGMII;
800                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
801                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
802                         priv->hw->pcs = STMMAC_PCS_SGMII;
803                 }
804         }
805 }
806
807 /**
808  * stmmac_init_phy - PHY initialization
809  * @dev: net device structure
810  * Description: it initializes the driver's PHY state, and attaches the PHY
811  * to the mac driver.
812  *  Return value:
813  *  0 on success
814  */
815 static int stmmac_init_phy(struct net_device *dev)
816 {
817         struct stmmac_priv *priv = netdev_priv(dev);
818         struct phy_device *phydev;
819         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
820         char bus_id[MII_BUS_ID_SIZE];
821         int interface = priv->plat->interface;
822         int max_speed = priv->plat->max_speed;
823         priv->oldlink = 0;
824         priv->speed = SPEED_UNKNOWN;
825         priv->oldduplex = DUPLEX_UNKNOWN;
826
827         if (priv->plat->phy_node) {
828                 phydev = of_phy_connect(dev, priv->plat->phy_node,
829                                         &stmmac_adjust_link, 0, interface);
830         } else {
831                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
832                          priv->plat->bus_id);
833
834                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
835                          priv->plat->phy_addr);
836                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
837                            phy_id_fmt);
838
839                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
840                                      interface);
841         }
842
843         if (IS_ERR_OR_NULL(phydev)) {
844                 netdev_err(priv->dev, "Could not attach to PHY\n");
845                 if (!phydev)
846                         return -ENODEV;
847
848                 return PTR_ERR(phydev);
849         }
850
851         /* Stop Advertising 1000BASE Capability if interface is not GMII */
852         if ((interface == PHY_INTERFACE_MODE_MII) ||
853             (interface == PHY_INTERFACE_MODE_RMII) ||
854                 (max_speed < 1000 && max_speed > 0))
855                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
856                                          SUPPORTED_1000baseT_Full);
857
858         /*
859          * Broken HW is sometimes missing the pull-up resistor on the
860          * MDIO line, which results in reads to non-existent devices returning
861          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
862          * device as well.
863          * Note: phydev->phy_id is the result of reading the UID PHY registers.
864          */
865         if (!priv->plat->phy_node && phydev->phy_id == 0) {
866                 phy_disconnect(phydev);
867                 return -ENODEV;
868         }
869
870         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
871          * subsequent PHY polling, make sure we force a link transition if
872          * we have a UP/DOWN/UP transition
873          */
874         if (phydev->is_pseudo_fixed_link)
875                 phydev->irq = PHY_POLL;
876
877         phy_attached_info(phydev);
878         return 0;
879 }
880
881 static void stmmac_display_rings(struct stmmac_priv *priv)
882 {
883         void *head_rx, *head_tx;
884
885         if (priv->extend_desc) {
886                 head_rx = (void *)priv->dma_erx;
887                 head_tx = (void *)priv->dma_etx;
888         } else {
889                 head_rx = (void *)priv->dma_rx;
890                 head_tx = (void *)priv->dma_tx;
891         }
892
893         /* Display Rx ring */
894         priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895         /* Display Tx ring */
896         priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 }
898
899 static int stmmac_set_bfsize(int mtu, int bufsize)
900 {
901         int ret = bufsize;
902
903         if (mtu >= BUF_SIZE_4KiB)
904                 ret = BUF_SIZE_8KiB;
905         else if (mtu >= BUF_SIZE_2KiB)
906                 ret = BUF_SIZE_4KiB;
907         else if (mtu > DEFAULT_BUFSIZE)
908                 ret = BUF_SIZE_2KiB;
909         else
910                 ret = DEFAULT_BUFSIZE;
911
912         return ret;
913 }
914
915 /**
916  * stmmac_clear_descriptors - clear descriptors
917  * @priv: driver private structure
918  * Description: this function is called to clear the tx and rx descriptors
919  * in case of both basic and extended descriptors are used.
920  */
921 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922 {
923         int i;
924
925         /* Clear the Rx/Tx descriptors */
926         for (i = 0; i < DMA_RX_SIZE; i++)
927                 if (priv->extend_desc)
928                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929                                                      priv->use_riwt, priv->mode,
930                                                      (i == DMA_RX_SIZE - 1));
931                 else
932                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933                                                      priv->use_riwt, priv->mode,
934                                                      (i == DMA_RX_SIZE - 1));
935         for (i = 0; i < DMA_TX_SIZE; i++)
936                 if (priv->extend_desc)
937                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938                                                      priv->mode,
939                                                      (i == DMA_TX_SIZE - 1));
940                 else
941                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942                                                      priv->mode,
943                                                      (i == DMA_TX_SIZE - 1));
944 }
945
946 /**
947  * stmmac_init_rx_buffers - init the RX descriptor buffer.
948  * @priv: driver private structure
949  * @p: descriptor pointer
950  * @i: descriptor index
951  * @flags: gfp flag.
952  * Description: this function is called to allocate a receive buffer, perform
953  * the DMA mapping and init the descriptor.
954  */
955 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956                                   int i, gfp_t flags)
957 {
958         struct sk_buff *skb;
959
960         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961         if (!skb) {
962                 netdev_err(priv->dev,
963                            "%s: Rx init fails; skb is NULL\n", __func__);
964                 return -ENOMEM;
965         }
966         priv->rx_skbuff[i] = skb;
967         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968                                                 priv->dma_buf_sz,
969                                                 DMA_FROM_DEVICE);
970         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
971                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972                 dev_kfree_skb_any(skb);
973                 return -EINVAL;
974         }
975
976         if (priv->synopsys_id >= DWMAC_CORE_4_00)
977                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
978         else
979                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
980
981         if ((priv->hw->mode->init_desc3) &&
982             (priv->dma_buf_sz == BUF_SIZE_16KiB))
983                 priv->hw->mode->init_desc3(p);
984
985         return 0;
986 }
987
988 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
989 {
990         if (priv->rx_skbuff[i]) {
991                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
992                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
993                 dev_kfree_skb_any(priv->rx_skbuff[i]);
994         }
995         priv->rx_skbuff[i] = NULL;
996 }
997
998 /**
999  * init_dma_desc_rings - init the RX/TX descriptor rings
1000  * @dev: net device structure
1001  * @flags: gfp flag.
1002  * Description: this function initializes the DMA RX/TX descriptors
1003  * and allocates the socket buffers. It supports the chained and ring
1004  * modes.
1005  */
1006 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007 {
1008         int i;
1009         struct stmmac_priv *priv = netdev_priv(dev);
1010         unsigned int bfsize = 0;
1011         int ret = -ENOMEM;
1012
1013         if (priv->hw->mode->set_16kib_bfsize)
1014                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1015
1016         if (bfsize < BUF_SIZE_16KiB)
1017                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1018
1019         priv->dma_buf_sz = bfsize;
1020
1021         netif_dbg(priv, probe, priv->dev,
1022                   "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023                   __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024
1025         /* RX INITIALIZATION */
1026         netif_dbg(priv, probe, priv->dev,
1027                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028
1029         for (i = 0; i < DMA_RX_SIZE; i++) {
1030                 struct dma_desc *p;
1031                 if (priv->extend_desc)
1032                         p = &((priv->dma_erx + i)->basic);
1033                 else
1034                         p = priv->dma_rx + i;
1035
1036                 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1037                 if (ret)
1038                         goto err_init_rx_buffers;
1039
1040                 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1041                           priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1042                           (unsigned int)priv->rx_skbuff_dma[i]);
1043         }
1044         priv->cur_rx = 0;
1045         priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046         buf_sz = bfsize;
1047
1048         /* Setup the chained descriptor addresses */
1049         if (priv->mode == STMMAC_CHAIN_MODE) {
1050                 if (priv->extend_desc) {
1051                         priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1052                                              DMA_RX_SIZE, 1);
1053                         priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1054                                              DMA_TX_SIZE, 1);
1055                 } else {
1056                         priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1057                                              DMA_RX_SIZE, 0);
1058                         priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1059                                              DMA_TX_SIZE, 0);
1060                 }
1061         }
1062
1063         /* TX INITIALIZATION */
1064         for (i = 0; i < DMA_TX_SIZE; i++) {
1065                 struct dma_desc *p;
1066                 if (priv->extend_desc)
1067                         p = &((priv->dma_etx + i)->basic);
1068                 else
1069                         p = priv->dma_tx + i;
1070
1071                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1072                         p->des0 = 0;
1073                         p->des1 = 0;
1074                         p->des2 = 0;
1075                         p->des3 = 0;
1076                 } else {
1077                         p->des2 = 0;
1078                 }
1079
1080                 priv->tx_skbuff_dma[i].buf = 0;
1081                 priv->tx_skbuff_dma[i].map_as_page = false;
1082                 priv->tx_skbuff_dma[i].len = 0;
1083                 priv->tx_skbuff_dma[i].last_segment = false;
1084                 priv->tx_skbuff[i] = NULL;
1085         }
1086
1087         priv->dirty_tx = 0;
1088         priv->cur_tx = 0;
1089         netdev_reset_queue(priv->dev);
1090
1091         stmmac_clear_descriptors(priv);
1092
1093         if (netif_msg_hw(priv))
1094                 stmmac_display_rings(priv);
1095
1096         return 0;
1097 err_init_rx_buffers:
1098         while (--i >= 0)
1099                 stmmac_free_rx_buffers(priv, i);
1100         return ret;
1101 }
1102
1103 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1104 {
1105         int i;
1106
1107         for (i = 0; i < DMA_RX_SIZE; i++)
1108                 stmmac_free_rx_buffers(priv, i);
1109 }
1110
1111 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112 {
1113         int i;
1114
1115         for (i = 0; i < DMA_TX_SIZE; i++) {
1116                 if (priv->tx_skbuff_dma[i].buf) {
1117                         if (priv->tx_skbuff_dma[i].map_as_page)
1118                                 dma_unmap_page(priv->device,
1119                                                priv->tx_skbuff_dma[i].buf,
1120                                                priv->tx_skbuff_dma[i].len,
1121                                                DMA_TO_DEVICE);
1122                         else
1123                                 dma_unmap_single(priv->device,
1124                                                  priv->tx_skbuff_dma[i].buf,
1125                                                  priv->tx_skbuff_dma[i].len,
1126                                                  DMA_TO_DEVICE);
1127                 }
1128
1129                 if (priv->tx_skbuff[i]) {
1130                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1131                         priv->tx_skbuff[i] = NULL;
1132                         priv->tx_skbuff_dma[i].buf = 0;
1133                         priv->tx_skbuff_dma[i].map_as_page = false;
1134                 }
1135         }
1136 }
1137
1138 /**
1139  * alloc_dma_desc_resources - alloc TX/RX resources.
1140  * @priv: private structure
1141  * Description: according to which descriptor can be used (extend or basic)
1142  * this function allocates the resources for TX and RX paths. In case of
1143  * reception, for example, it pre-allocated the RX socket buffer in order to
1144  * allow zero-copy mechanism.
1145  */
1146 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1147 {
1148         int ret = -ENOMEM;
1149
1150         priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1151                                             GFP_KERNEL);
1152         if (!priv->rx_skbuff_dma)
1153                 return -ENOMEM;
1154
1155         priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1156                                         GFP_KERNEL);
1157         if (!priv->rx_skbuff)
1158                 goto err_rx_skbuff;
1159
1160         priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1161                                             sizeof(*priv->tx_skbuff_dma),
1162                                             GFP_KERNEL);
1163         if (!priv->tx_skbuff_dma)
1164                 goto err_tx_skbuff_dma;
1165
1166         priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167                                         GFP_KERNEL);
1168         if (!priv->tx_skbuff)
1169                 goto err_tx_skbuff;
1170
1171         if (priv->extend_desc) {
1172                 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173                                                     sizeof(struct
1174                                                            dma_extended_desc),
1175                                                     &priv->dma_rx_phy,
1176                                                     GFP_KERNEL);
1177                 if (!priv->dma_erx)
1178                         goto err_dma;
1179
1180                 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1181                                                     sizeof(struct
1182                                                            dma_extended_desc),
1183                                                     &priv->dma_tx_phy,
1184                                                     GFP_KERNEL);
1185                 if (!priv->dma_etx) {
1186                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1187                                           sizeof(struct dma_extended_desc),
1188                                           priv->dma_erx, priv->dma_rx_phy);
1189                         goto err_dma;
1190                 }
1191         } else {
1192                 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1193                                                    sizeof(struct dma_desc),
1194                                                    &priv->dma_rx_phy,
1195                                                    GFP_KERNEL);
1196                 if (!priv->dma_rx)
1197                         goto err_dma;
1198
1199                 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1200                                                    sizeof(struct dma_desc),
1201                                                    &priv->dma_tx_phy,
1202                                                    GFP_KERNEL);
1203                 if (!priv->dma_tx) {
1204                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1205                                           sizeof(struct dma_desc),
1206                                           priv->dma_rx, priv->dma_rx_phy);
1207                         goto err_dma;
1208                 }
1209         }
1210
1211         return 0;
1212
1213 err_dma:
1214         kfree(priv->tx_skbuff);
1215 err_tx_skbuff:
1216         kfree(priv->tx_skbuff_dma);
1217 err_tx_skbuff_dma:
1218         kfree(priv->rx_skbuff);
1219 err_rx_skbuff:
1220         kfree(priv->rx_skbuff_dma);
1221         return ret;
1222 }
1223
1224 static void free_dma_desc_resources(struct stmmac_priv *priv)
1225 {
1226         /* Release the DMA TX/RX socket buffers */
1227         dma_free_rx_skbufs(priv);
1228         dma_free_tx_skbufs(priv);
1229
1230         /* Free DMA regions of consistent memory previously allocated */
1231         if (!priv->extend_desc) {
1232                 dma_free_coherent(priv->device,
1233                                   DMA_TX_SIZE * sizeof(struct dma_desc),
1234                                   priv->dma_tx, priv->dma_tx_phy);
1235                 dma_free_coherent(priv->device,
1236                                   DMA_RX_SIZE * sizeof(struct dma_desc),
1237                                   priv->dma_rx, priv->dma_rx_phy);
1238         } else {
1239                 dma_free_coherent(priv->device, DMA_TX_SIZE *
1240                                   sizeof(struct dma_extended_desc),
1241                                   priv->dma_etx, priv->dma_tx_phy);
1242                 dma_free_coherent(priv->device, DMA_RX_SIZE *
1243                                   sizeof(struct dma_extended_desc),
1244                                   priv->dma_erx, priv->dma_rx_phy);
1245         }
1246         kfree(priv->rx_skbuff_dma);
1247         kfree(priv->rx_skbuff);
1248         kfree(priv->tx_skbuff_dma);
1249         kfree(priv->tx_skbuff);
1250 }
1251
1252 /**
1253  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1254  *  @priv: driver private structure
1255  *  Description: It is used for enabling the rx queues in the MAC
1256  */
1257 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258 {
1259         int rx_count = priv->dma_cap.number_rx_queues;
1260         int queue = 0;
1261
1262         /* If GMAC does not have multiple queues, then this is not necessary*/
1263         if (rx_count == 1)
1264                 return;
1265
1266         /**
1267          *  If the core is synthesized with multiple rx queues / multiple
1268          *  dma channels, then rx queues will be disabled by default.
1269          *  For now only rx queue 0 is enabled.
1270          */
1271         priv->hw->mac->rx_queue_enable(priv->hw, queue);
1272 }
1273
1274 /**
1275  *  stmmac_dma_operation_mode - HW DMA operation mode
1276  *  @priv: driver private structure
1277  *  Description: it is used for configuring the DMA operation mode register in
1278  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1279  */
1280 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1281 {
1282         int rxfifosz = priv->plat->rx_fifo_size;
1283
1284         if (rxfifosz == 0)
1285                 rxfifosz = priv->dma_cap.rx_fifo_size;
1286
1287         if (priv->plat->force_thresh_dma_mode)
1288                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1289         else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1290                 /*
1291                  * In case of GMAC, SF mode can be enabled
1292                  * to perform the TX COE in HW. This depends on:
1293                  * 1) TX COE if actually supported
1294                  * 2) There is no bugged Jumbo frame support
1295                  *    that needs to not insert csum in the TDES.
1296                  */
1297                 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1298                                         rxfifosz);
1299                 priv->xstats.threshold = SF_DMA_MODE;
1300         } else
1301                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1302                                         rxfifosz);
1303 }
1304
1305 /**
1306  * stmmac_tx_clean - to manage the transmission completion
1307  * @priv: driver private structure
1308  * Description: it reclaims the transmit resources after transmission completes.
1309  */
1310 static void stmmac_tx_clean(struct stmmac_priv *priv)
1311 {
1312         unsigned int bytes_compl = 0, pkts_compl = 0;
1313         unsigned int entry = priv->dirty_tx;
1314
1315         netif_tx_lock(priv->dev);
1316
1317         priv->xstats.tx_clean++;
1318
1319         while (entry != priv->cur_tx) {
1320                 struct sk_buff *skb = priv->tx_skbuff[entry];
1321                 struct dma_desc *p;
1322                 int status;
1323
1324                 if (priv->extend_desc)
1325                         p = (struct dma_desc *)(priv->dma_etx + entry);
1326                 else
1327                         p = priv->dma_tx + entry;
1328
1329                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1330                                                       &priv->xstats, p,
1331                                                       priv->ioaddr);
1332                 /* Check if the descriptor is owned by the DMA */
1333                 if (unlikely(status & tx_dma_own))
1334                         break;
1335
1336                 /* Just consider the last segment and ...*/
1337                 if (likely(!(status & tx_not_ls))) {
1338                         /* ... verify the status error condition */
1339                         if (unlikely(status & tx_err)) {
1340                                 priv->dev->stats.tx_errors++;
1341                         } else {
1342                                 priv->dev->stats.tx_packets++;
1343                                 priv->xstats.tx_pkt_n++;
1344                         }
1345                         stmmac_get_tx_hwtstamp(priv, p, skb);
1346                 }
1347
1348                 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1349                         if (priv->tx_skbuff_dma[entry].map_as_page)
1350                                 dma_unmap_page(priv->device,
1351                                                priv->tx_skbuff_dma[entry].buf,
1352                                                priv->tx_skbuff_dma[entry].len,
1353                                                DMA_TO_DEVICE);
1354                         else
1355                                 dma_unmap_single(priv->device,
1356                                                  priv->tx_skbuff_dma[entry].buf,
1357                                                  priv->tx_skbuff_dma[entry].len,
1358                                                  DMA_TO_DEVICE);
1359                         priv->tx_skbuff_dma[entry].buf = 0;
1360                         priv->tx_skbuff_dma[entry].len = 0;
1361                         priv->tx_skbuff_dma[entry].map_as_page = false;
1362                 }
1363
1364                 if (priv->hw->mode->clean_desc3)
1365                         priv->hw->mode->clean_desc3(priv, p);
1366
1367                 priv->tx_skbuff_dma[entry].last_segment = false;
1368                 priv->tx_skbuff_dma[entry].is_jumbo = false;
1369
1370                 if (likely(skb != NULL)) {
1371                         pkts_compl++;
1372                         bytes_compl += skb->len;
1373                         dev_consume_skb_any(skb);
1374                         priv->tx_skbuff[entry] = NULL;
1375                 }
1376
1377                 priv->hw->desc->release_tx_desc(p, priv->mode);
1378
1379                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1380         }
1381         priv->dirty_tx = entry;
1382
1383         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1384
1385         if (unlikely(netif_queue_stopped(priv->dev) &&
1386             stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1387                 netif_dbg(priv, tx_done, priv->dev,
1388                           "%s: restart transmit\n", __func__);
1389                 netif_wake_queue(priv->dev);
1390         }
1391
1392         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1393                 stmmac_enable_eee_mode(priv);
1394                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1395         }
1396         netif_tx_unlock(priv->dev);
1397 }
1398
1399 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1400 {
1401         priv->hw->dma->enable_dma_irq(priv->ioaddr);
1402 }
1403
1404 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1405 {
1406         priv->hw->dma->disable_dma_irq(priv->ioaddr);
1407 }
1408
1409 /**
1410  * stmmac_tx_err - to manage the tx error
1411  * @priv: driver private structure
1412  * Description: it cleans the descriptors and restarts the transmission
1413  * in case of transmission errors.
1414  */
1415 static void stmmac_tx_err(struct stmmac_priv *priv)
1416 {
1417         int i;
1418         netif_stop_queue(priv->dev);
1419
1420         priv->hw->dma->stop_tx(priv->ioaddr);
1421         dma_free_tx_skbufs(priv);
1422         for (i = 0; i < DMA_TX_SIZE; i++)
1423                 if (priv->extend_desc)
1424                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1425                                                      priv->mode,
1426                                                      (i == DMA_TX_SIZE - 1));
1427                 else
1428                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1429                                                      priv->mode,
1430                                                      (i == DMA_TX_SIZE - 1));
1431         priv->dirty_tx = 0;
1432         priv->cur_tx = 0;
1433         netdev_reset_queue(priv->dev);
1434         priv->hw->dma->start_tx(priv->ioaddr);
1435
1436         priv->dev->stats.tx_errors++;
1437         netif_wake_queue(priv->dev);
1438 }
1439
1440 /**
1441  * stmmac_dma_interrupt - DMA ISR
1442  * @priv: driver private structure
1443  * Description: this is the DMA ISR. It is called by the main ISR.
1444  * It calls the dwmac dma routine and schedule poll method in case of some
1445  * work can be done.
1446  */
1447 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1448 {
1449         int status;
1450         int rxfifosz = priv->plat->rx_fifo_size;
1451
1452         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1453         if (likely((status & handle_rx)) || (status & handle_tx)) {
1454                 if (likely(napi_schedule_prep(&priv->napi))) {
1455                         stmmac_disable_dma_irq(priv);
1456                         __napi_schedule(&priv->napi);
1457                 }
1458         }
1459         if (unlikely(status & tx_hard_error_bump_tc)) {
1460                 /* Try to bump up the dma threshold on this failure */
1461                 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1462                     (tc <= 256)) {
1463                         tc += 64;
1464                         if (priv->plat->force_thresh_dma_mode)
1465                                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1466                                                         rxfifosz);
1467                         else
1468                                 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1469                                                         SF_DMA_MODE, rxfifosz);
1470                         priv->xstats.threshold = tc;
1471                 }
1472         } else if (unlikely(status == tx_hard_error))
1473                 stmmac_tx_err(priv);
1474 }
1475
1476 /**
1477  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1478  * @priv: driver private structure
1479  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1480  */
1481 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1482 {
1483         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1484                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1485
1486         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1487                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1488                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1489         } else {
1490                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1491                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1492         }
1493
1494         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1495
1496         if (priv->dma_cap.rmon) {
1497                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1498                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1499         } else
1500                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1501 }
1502
1503 /**
1504  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1505  * @priv: driver private structure
1506  * Description: select the Enhanced/Alternate or Normal descriptors.
1507  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1508  * supported by the HW capability register.
1509  */
1510 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1511 {
1512         if (priv->plat->enh_desc) {
1513                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1514
1515                 /* GMAC older than 3.50 has no extended descriptors */
1516                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1517                         dev_info(priv->device, "Enabled extended descriptors\n");
1518                         priv->extend_desc = 1;
1519                 } else
1520                         dev_warn(priv->device, "Extended descriptors not supported\n");
1521
1522                 priv->hw->desc = &enh_desc_ops;
1523         } else {
1524                 dev_info(priv->device, "Normal descriptors\n");
1525                 priv->hw->desc = &ndesc_ops;
1526         }
1527 }
1528
1529 /**
1530  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1531  * @priv: driver private structure
1532  * Description:
1533  *  new GMAC chip generations have a new register to indicate the
1534  *  presence of the optional feature/functions.
1535  *  This can be also used to override the value passed through the
1536  *  platform and necessary for old MAC10/100 and GMAC chips.
1537  */
1538 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1539 {
1540         u32 ret = 0;
1541
1542         if (priv->hw->dma->get_hw_feature) {
1543                 priv->hw->dma->get_hw_feature(priv->ioaddr,
1544                                               &priv->dma_cap);
1545                 ret = 1;
1546         }
1547
1548         return ret;
1549 }
1550
1551 /**
1552  * stmmac_check_ether_addr - check if the MAC addr is valid
1553  * @priv: driver private structure
1554  * Description:
1555  * it is to verify if the MAC address is valid, in case of failures it
1556  * generates a random MAC address
1557  */
1558 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1559 {
1560         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1561                 priv->hw->mac->get_umac_addr(priv->hw,
1562                                              priv->dev->dev_addr, 0);
1563                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1564                         eth_hw_addr_random(priv->dev);
1565                 netdev_info(priv->dev, "device MAC address %pM\n",
1566                             priv->dev->dev_addr);
1567         }
1568 }
1569
1570 /**
1571  * stmmac_init_dma_engine - DMA init.
1572  * @priv: driver private structure
1573  * Description:
1574  * It inits the DMA invoking the specific MAC/GMAC callback.
1575  * Some DMA parameters can be passed from the platform;
1576  * in case of these are not passed a default is kept for the MAC or GMAC.
1577  */
1578 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1579 {
1580         int atds = 0;
1581         int ret = 0;
1582
1583         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1584                 dev_err(priv->device, "Invalid DMA configuration\n");
1585                 return -EINVAL;
1586         }
1587
1588         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1589                 atds = 1;
1590
1591         ret = priv->hw->dma->reset(priv->ioaddr);
1592         if (ret) {
1593                 dev_err(priv->device, "Failed to reset the dma\n");
1594                 return ret;
1595         }
1596
1597         priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1598                             priv->dma_tx_phy, priv->dma_rx_phy, atds);
1599
1600         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1601                 priv->rx_tail_addr = priv->dma_rx_phy +
1602                             (DMA_RX_SIZE * sizeof(struct dma_desc));
1603                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1604                                                STMMAC_CHAN0);
1605
1606                 priv->tx_tail_addr = priv->dma_tx_phy +
1607                             (DMA_TX_SIZE * sizeof(struct dma_desc));
1608                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1609                                                STMMAC_CHAN0);
1610         }
1611
1612         if (priv->plat->axi && priv->hw->dma->axi)
1613                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1614
1615         return ret;
1616 }
1617
1618 /**
1619  * stmmac_tx_timer - mitigation sw timer for tx.
1620  * @data: data pointer
1621  * Description:
1622  * This is the timer handler to directly invoke the stmmac_tx_clean.
1623  */
1624 static void stmmac_tx_timer(unsigned long data)
1625 {
1626         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1627
1628         stmmac_tx_clean(priv);
1629 }
1630
1631 /**
1632  * stmmac_init_tx_coalesce - init tx mitigation options.
1633  * @priv: driver private structure
1634  * Description:
1635  * This inits the transmit coalesce parameters: i.e. timer rate,
1636  * timer handler and default threshold used for enabling the
1637  * interrupt on completion bit.
1638  */
1639 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1640 {
1641         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1642         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1643         init_timer(&priv->txtimer);
1644         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1645         priv->txtimer.data = (unsigned long)priv;
1646         priv->txtimer.function = stmmac_tx_timer;
1647         add_timer(&priv->txtimer);
1648 }
1649
1650 /**
1651  *  stmmac_set_tx_queue_weight - Set TX queue weight
1652  *  @priv: driver private structure
1653  *  Description: It is used for setting TX queues weight
1654  */
1655 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
1656 {
1657         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1658         u32 weight;
1659         u32 queue;
1660
1661         for (queue = 0; queue < tx_queues_count; queue++) {
1662                 weight = priv->plat->tx_queues_cfg[queue].weight;
1663                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
1664         }
1665 }
1666
1667 /**
1668  *  stmmac_mtl_configuration - Configure MTL
1669  *  @priv: driver private structure
1670  *  Description: It is used for configurring MTL
1671  */
1672 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
1673 {
1674         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1675         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1676
1677         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
1678                 stmmac_set_tx_queue_weight(priv);
1679
1680         /* Configure MTL RX algorithms */
1681         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
1682                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
1683                                                 priv->plat->rx_sched_algorithm);
1684
1685         /* Configure MTL TX algorithms */
1686         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
1687                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
1688                                                 priv->plat->tx_sched_algorithm);
1689
1690         /* Enable MAC RX Queues */
1691         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
1692                 stmmac_mac_enable_rx_queues(priv);
1693 }
1694
1695 /**
1696  * stmmac_hw_setup - setup mac in a usable state.
1697  *  @dev : pointer to the device structure.
1698  *  Description:
1699  *  this is the main function to setup the HW in a usable state because the
1700  *  dma engine is reset, the core registers are configured (e.g. AXI,
1701  *  Checksum features, timers). The DMA is ready to start receiving and
1702  *  transmitting.
1703  *  Return value:
1704  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1705  *  file on failure.
1706  */
1707 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1708 {
1709         struct stmmac_priv *priv = netdev_priv(dev);
1710         int ret;
1711
1712         /* DMA initialization and SW reset */
1713         ret = stmmac_init_dma_engine(priv);
1714         if (ret < 0) {
1715                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1716                            __func__);
1717                 return ret;
1718         }
1719
1720         /* Copy the MAC addr into the HW  */
1721         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1722
1723         /* PS and related bits will be programmed according to the speed */
1724         if (priv->hw->pcs) {
1725                 int speed = priv->plat->mac_port_sel_speed;
1726
1727                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1728                     (speed == SPEED_1000)) {
1729                         priv->hw->ps = speed;
1730                 } else {
1731                         dev_warn(priv->device, "invalid port speed\n");
1732                         priv->hw->ps = 0;
1733                 }
1734         }
1735
1736         /* Initialize the MAC Core */
1737         priv->hw->mac->core_init(priv->hw, dev->mtu);
1738
1739         /* Initialize MTL*/
1740         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1741                 stmmac_mtl_configuration(priv);
1742
1743         ret = priv->hw->mac->rx_ipc(priv->hw);
1744         if (!ret) {
1745                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1746                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1747                 priv->hw->rx_csum = 0;
1748         }
1749
1750         /* Enable the MAC Rx/Tx */
1751         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1752                 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1753         else
1754                 stmmac_set_mac(priv->ioaddr, true);
1755
1756         /* Set the HW DMA mode and the COE */
1757         stmmac_dma_operation_mode(priv);
1758
1759         stmmac_mmc_setup(priv);
1760
1761         if (init_ptp) {
1762                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1763                 if (ret < 0)
1764                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
1765
1766                 ret = stmmac_init_ptp(priv);
1767                 if (ret == -EOPNOTSUPP)
1768                         netdev_warn(priv->dev, "PTP not supported by HW\n");
1769                 else if (ret)
1770                         netdev_warn(priv->dev, "PTP init failed\n");
1771         }
1772
1773 #ifdef CONFIG_DEBUG_FS
1774         ret = stmmac_init_fs(dev);
1775         if (ret < 0)
1776                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1777                             __func__);
1778 #endif
1779         /* Start the ball rolling... */
1780         netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1781         priv->hw->dma->start_tx(priv->ioaddr);
1782         priv->hw->dma->start_rx(priv->ioaddr);
1783
1784         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1785
1786         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1787                 priv->rx_riwt = MAX_DMA_RIWT;
1788                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1789         }
1790
1791         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1792                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1793
1794         /*  set TX ring length */
1795         if (priv->hw->dma->set_tx_ring_len)
1796                 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1797                                                (DMA_TX_SIZE - 1));
1798         /*  set RX ring length */
1799         if (priv->hw->dma->set_rx_ring_len)
1800                 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1801                                                (DMA_RX_SIZE - 1));
1802         /* Enable TSO */
1803         if (priv->tso)
1804                 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1805
1806         return 0;
1807 }
1808
1809 static void stmmac_hw_teardown(struct net_device *dev)
1810 {
1811         struct stmmac_priv *priv = netdev_priv(dev);
1812
1813         clk_disable_unprepare(priv->plat->clk_ptp_ref);
1814 }
1815
1816 /**
1817  *  stmmac_open - open entry point of the driver
1818  *  @dev : pointer to the device structure.
1819  *  Description:
1820  *  This function is the open entry point of the driver.
1821  *  Return value:
1822  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1823  *  file on failure.
1824  */
1825 static int stmmac_open(struct net_device *dev)
1826 {
1827         struct stmmac_priv *priv = netdev_priv(dev);
1828         int ret;
1829
1830         stmmac_check_ether_addr(priv);
1831
1832         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1833             priv->hw->pcs != STMMAC_PCS_TBI &&
1834             priv->hw->pcs != STMMAC_PCS_RTBI) {
1835                 ret = stmmac_init_phy(dev);
1836                 if (ret) {
1837                         netdev_err(priv->dev,
1838                                    "%s: Cannot attach to PHY (error: %d)\n",
1839                                    __func__, ret);
1840                         return ret;
1841                 }
1842         }
1843
1844         /* Extra statistics */
1845         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1846         priv->xstats.threshold = tc;
1847
1848         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1849         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1850
1851         ret = alloc_dma_desc_resources(priv);
1852         if (ret < 0) {
1853                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1854                            __func__);
1855                 goto dma_desc_error;
1856         }
1857
1858         ret = init_dma_desc_rings(dev, GFP_KERNEL);
1859         if (ret < 0) {
1860                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1861                            __func__);
1862                 goto init_error;
1863         }
1864
1865         ret = stmmac_hw_setup(dev, true);
1866         if (ret < 0) {
1867                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1868                 goto init_error;
1869         }
1870
1871         stmmac_init_tx_coalesce(priv);
1872
1873         if (dev->phydev)
1874                 phy_start(dev->phydev);
1875
1876         /* Request the IRQ lines */
1877         ret = request_irq(dev->irq, stmmac_interrupt,
1878                           IRQF_SHARED, dev->name, dev);
1879         if (unlikely(ret < 0)) {
1880                 netdev_err(priv->dev,
1881                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1882                            __func__, dev->irq, ret);
1883                 goto irq_error;
1884         }
1885
1886         /* Request the Wake IRQ in case of another line is used for WoL */
1887         if (priv->wol_irq != dev->irq) {
1888                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1889                                   IRQF_SHARED, dev->name, dev);
1890                 if (unlikely(ret < 0)) {
1891                         netdev_err(priv->dev,
1892                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1893                                    __func__, priv->wol_irq, ret);
1894                         goto wolirq_error;
1895                 }
1896         }
1897
1898         /* Request the IRQ lines */
1899         if (priv->lpi_irq > 0) {
1900                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1901                                   dev->name, dev);
1902                 if (unlikely(ret < 0)) {
1903                         netdev_err(priv->dev,
1904                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1905                                    __func__, priv->lpi_irq, ret);
1906                         goto lpiirq_error;
1907                 }
1908         }
1909
1910         napi_enable(&priv->napi);
1911         netif_start_queue(dev);
1912
1913         return 0;
1914
1915 lpiirq_error:
1916         if (priv->wol_irq != dev->irq)
1917                 free_irq(priv->wol_irq, dev);
1918 wolirq_error:
1919         free_irq(dev->irq, dev);
1920 irq_error:
1921         if (dev->phydev)
1922                 phy_stop(dev->phydev);
1923
1924         del_timer_sync(&priv->txtimer);
1925         stmmac_hw_teardown(dev);
1926 init_error:
1927         free_dma_desc_resources(priv);
1928 dma_desc_error:
1929         if (dev->phydev)
1930                 phy_disconnect(dev->phydev);
1931
1932         return ret;
1933 }
1934
1935 /**
1936  *  stmmac_release - close entry point of the driver
1937  *  @dev : device pointer.
1938  *  Description:
1939  *  This is the stop entry point of the driver.
1940  */
1941 static int stmmac_release(struct net_device *dev)
1942 {
1943         struct stmmac_priv *priv = netdev_priv(dev);
1944
1945         if (priv->eee_enabled)
1946                 del_timer_sync(&priv->eee_ctrl_timer);
1947
1948         /* Stop and disconnect the PHY */
1949         if (dev->phydev) {
1950                 phy_stop(dev->phydev);
1951                 phy_disconnect(dev->phydev);
1952         }
1953
1954         netif_stop_queue(dev);
1955
1956         napi_disable(&priv->napi);
1957
1958         del_timer_sync(&priv->txtimer);
1959
1960         /* Free the IRQ lines */
1961         free_irq(dev->irq, dev);
1962         if (priv->wol_irq != dev->irq)
1963                 free_irq(priv->wol_irq, dev);
1964         if (priv->lpi_irq > 0)
1965                 free_irq(priv->lpi_irq, dev);
1966
1967         /* Stop TX/RX DMA and clear the descriptors */
1968         priv->hw->dma->stop_tx(priv->ioaddr);
1969         priv->hw->dma->stop_rx(priv->ioaddr);
1970
1971         /* Release and free the Rx/Tx resources */
1972         free_dma_desc_resources(priv);
1973
1974         /* Disable the MAC Rx/Tx */
1975         stmmac_set_mac(priv->ioaddr, false);
1976
1977         netif_carrier_off(dev);
1978
1979 #ifdef CONFIG_DEBUG_FS
1980         stmmac_exit_fs(dev);
1981 #endif
1982
1983         stmmac_release_ptp(priv);
1984
1985         return 0;
1986 }
1987
1988 /**
1989  *  stmmac_tso_allocator - close entry point of the driver
1990  *  @priv: driver private structure
1991  *  @des: buffer start address
1992  *  @total_len: total length to fill in descriptors
1993  *  @last_segmant: condition for the last descriptor
1994  *  Description:
1995  *  This function fills descriptor and request new descriptors according to
1996  *  buffer length to fill
1997  */
1998 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1999                                  int total_len, bool last_segment)
2000 {
2001         struct dma_desc *desc;
2002         int tmp_len;
2003         u32 buff_size;
2004
2005         tmp_len = total_len;
2006
2007         while (tmp_len > 0) {
2008                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2009                 desc = priv->dma_tx + priv->cur_tx;
2010
2011                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2012                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2013                             TSO_MAX_BUFF_SIZE : tmp_len;
2014
2015                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2016                         0, 1,
2017                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2018                         0, 0);
2019
2020                 tmp_len -= TSO_MAX_BUFF_SIZE;
2021         }
2022 }
2023
2024 /**
2025  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2026  *  @skb : the socket buffer
2027  *  @dev : device pointer
2028  *  Description: this is the transmit function that is called on TSO frames
2029  *  (support available on GMAC4 and newer chips).
2030  *  Diagram below show the ring programming in case of TSO frames:
2031  *
2032  *  First Descriptor
2033  *   --------
2034  *   | DES0 |---> buffer1 = L2/L3/L4 header
2035  *   | DES1 |---> TCP Payload (can continue on next descr...)
2036  *   | DES2 |---> buffer 1 and 2 len
2037  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2038  *   --------
2039  *      |
2040  *     ...
2041  *      |
2042  *   --------
2043  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2044  *   | DES1 | --|
2045  *   | DES2 | --> buffer 1 and 2 len
2046  *   | DES3 |
2047  *   --------
2048  *
2049  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2050  */
2051 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2052 {
2053         u32 pay_len, mss;
2054         int tmp_pay_len = 0;
2055         struct stmmac_priv *priv = netdev_priv(dev);
2056         int nfrags = skb_shinfo(skb)->nr_frags;
2057         unsigned int first_entry, des;
2058         struct dma_desc *desc, *first, *mss_desc = NULL;
2059         u8 proto_hdr_len;
2060         int i;
2061
2062         /* Compute header lengths */
2063         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2064
2065         /* Desc availability based on threshold should be enough safe */
2066         if (unlikely(stmmac_tx_avail(priv) <
2067                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2068                 if (!netif_queue_stopped(dev)) {
2069                         netif_stop_queue(dev);
2070                         /* This is a hard error, log it. */
2071                         netdev_err(priv->dev,
2072                                    "%s: Tx Ring full when queue awake\n",
2073                                    __func__);
2074                 }
2075                 return NETDEV_TX_BUSY;
2076         }
2077
2078         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2079
2080         mss = skb_shinfo(skb)->gso_size;
2081
2082         /* set new MSS value if needed */
2083         if (mss != priv->mss) {
2084                 mss_desc = priv->dma_tx + priv->cur_tx;
2085                 priv->hw->desc->set_mss(mss_desc, mss);
2086                 priv->mss = mss;
2087                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2088         }
2089
2090         if (netif_msg_tx_queued(priv)) {
2091                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2092                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2093                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2094                         skb->data_len);
2095         }
2096
2097         first_entry = priv->cur_tx;
2098
2099         desc = priv->dma_tx + first_entry;
2100         first = desc;
2101
2102         /* first descriptor: fill Headers on Buf1 */
2103         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2104                              DMA_TO_DEVICE);
2105         if (dma_mapping_error(priv->device, des))
2106                 goto dma_map_err;
2107
2108         priv->tx_skbuff_dma[first_entry].buf = des;
2109         priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2110         priv->tx_skbuff[first_entry] = skb;
2111
2112         first->des0 = cpu_to_le32(des);
2113
2114         /* Fill start of payload in buff2 of first descriptor */
2115         if (pay_len)
2116                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2117
2118         /* If needed take extra descriptors to fill the remaining payload */
2119         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2120
2121         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2122
2123         /* Prepare fragments */
2124         for (i = 0; i < nfrags; i++) {
2125                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2126
2127                 des = skb_frag_dma_map(priv->device, frag, 0,
2128                                        skb_frag_size(frag),
2129                                        DMA_TO_DEVICE);
2130                 if (dma_mapping_error(priv->device, des))
2131                         goto dma_map_err;
2132
2133                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2134                                      (i == nfrags - 1));
2135
2136                 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2137                 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2138                 priv->tx_skbuff[priv->cur_tx] = NULL;
2139                 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2140         }
2141
2142         priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2143
2144         priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2145
2146         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2147                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2148                           __func__);
2149                 netif_stop_queue(dev);
2150         }
2151
2152         dev->stats.tx_bytes += skb->len;
2153         priv->xstats.tx_tso_frames++;
2154         priv->xstats.tx_tso_nfrags += nfrags;
2155
2156         /* Manage tx mitigation */
2157         priv->tx_count_frames += nfrags + 1;
2158         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2159                 mod_timer(&priv->txtimer,
2160                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2161         } else {
2162                 priv->tx_count_frames = 0;
2163                 priv->hw->desc->set_tx_ic(desc);
2164                 priv->xstats.tx_set_ic_bit++;
2165         }
2166
2167         if (!priv->hwts_tx_en)
2168                 skb_tx_timestamp(skb);
2169
2170         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2171                      priv->hwts_tx_en)) {
2172                 /* declare that device is doing timestamping */
2173                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2174                 priv->hw->desc->enable_tx_timestamp(first);
2175         }
2176
2177         /* Complete the first descriptor before granting the DMA */
2178         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2179                         proto_hdr_len,
2180                         pay_len,
2181                         1, priv->tx_skbuff_dma[first_entry].last_segment,
2182                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2183
2184         /* If context desc is used to change MSS */
2185         if (mss_desc)
2186                 priv->hw->desc->set_tx_owner(mss_desc);
2187
2188         /* The own bit must be the latest setting done when prepare the
2189          * descriptor and then barrier is needed to make sure that
2190          * all is coherent before granting the DMA engine.
2191          */
2192         dma_wmb();
2193
2194         if (netif_msg_pktdata(priv)) {
2195                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2196                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2197                         priv->cur_tx, first, nfrags);
2198
2199                 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2200                                              0);
2201
2202                 pr_info(">>> frame to be transmitted: ");
2203                 print_pkt(skb->data, skb_headlen(skb));
2204         }
2205
2206         netdev_sent_queue(dev, skb->len);
2207
2208         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2209                                        STMMAC_CHAN0);
2210
2211         return NETDEV_TX_OK;
2212
2213 dma_map_err:
2214         dev_err(priv->device, "Tx dma map failed\n");
2215         dev_kfree_skb(skb);
2216         priv->dev->stats.tx_dropped++;
2217         return NETDEV_TX_OK;
2218 }
2219
2220 /**
2221  *  stmmac_xmit - Tx entry point of the driver
2222  *  @skb : the socket buffer
2223  *  @dev : device pointer
2224  *  Description : this is the tx entry point of the driver.
2225  *  It programs the chain or the ring and supports oversized frames
2226  *  and SG feature.
2227  */
2228 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2229 {
2230         struct stmmac_priv *priv = netdev_priv(dev);
2231         unsigned int nopaged_len = skb_headlen(skb);
2232         int i, csum_insertion = 0, is_jumbo = 0;
2233         int nfrags = skb_shinfo(skb)->nr_frags;
2234         unsigned int entry, first_entry;
2235         struct dma_desc *desc, *first;
2236         unsigned int enh_desc;
2237         unsigned int des;
2238
2239         /* Manage oversized TCP frames for GMAC4 device */
2240         if (skb_is_gso(skb) && priv->tso) {
2241                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2242                         return stmmac_tso_xmit(skb, dev);
2243         }
2244
2245         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2246                 if (!netif_queue_stopped(dev)) {
2247                         netif_stop_queue(dev);
2248                         /* This is a hard error, log it. */
2249                         netdev_err(priv->dev,
2250                                    "%s: Tx Ring full when queue awake\n",
2251                                    __func__);
2252                 }
2253                 return NETDEV_TX_BUSY;
2254         }
2255
2256         if (priv->tx_path_in_lpi_mode)
2257                 stmmac_disable_eee_mode(priv);
2258
2259         entry = priv->cur_tx;
2260         first_entry = entry;
2261
2262         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2263
2264         if (likely(priv->extend_desc))
2265                 desc = (struct dma_desc *)(priv->dma_etx + entry);
2266         else
2267                 desc = priv->dma_tx + entry;
2268
2269         first = desc;
2270
2271         priv->tx_skbuff[first_entry] = skb;
2272
2273         enh_desc = priv->plat->enh_desc;
2274         /* To program the descriptors according to the size of the frame */
2275         if (enh_desc)
2276                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2277
2278         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2279                                          DWMAC_CORE_4_00)) {
2280                 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2281                 if (unlikely(entry < 0))
2282                         goto dma_map_err;
2283         }
2284
2285         for (i = 0; i < nfrags; i++) {
2286                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2287                 int len = skb_frag_size(frag);
2288                 bool last_segment = (i == (nfrags - 1));
2289
2290                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2291
2292                 if (likely(priv->extend_desc))
2293                         desc = (struct dma_desc *)(priv->dma_etx + entry);
2294                 else
2295                         desc = priv->dma_tx + entry;
2296
2297                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2298                                        DMA_TO_DEVICE);
2299                 if (dma_mapping_error(priv->device, des))
2300                         goto dma_map_err; /* should reuse desc w/o issues */
2301
2302                 priv->tx_skbuff[entry] = NULL;
2303
2304                 priv->tx_skbuff_dma[entry].buf = des;
2305                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2306                         desc->des0 = cpu_to_le32(des);
2307                 else
2308                         desc->des2 = cpu_to_le32(des);
2309
2310                 priv->tx_skbuff_dma[entry].map_as_page = true;
2311                 priv->tx_skbuff_dma[entry].len = len;
2312                 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2313
2314                 /* Prepare the descriptor and set the own bit too */
2315                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2316                                                 priv->mode, 1, last_segment);
2317         }
2318
2319         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2320
2321         priv->cur_tx = entry;
2322
2323         if (netif_msg_pktdata(priv)) {
2324                 void *tx_head;
2325
2326                 netdev_dbg(priv->dev,
2327                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2328                            __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2329                            entry, first, nfrags);
2330
2331                 if (priv->extend_desc)
2332                         tx_head = (void *)priv->dma_etx;
2333                 else
2334                         tx_head = (void *)priv->dma_tx;
2335
2336                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2337
2338                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2339                 print_pkt(skb->data, skb->len);
2340         }
2341
2342         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2343                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2344                           __func__);
2345                 netif_stop_queue(dev);
2346         }
2347
2348         dev->stats.tx_bytes += skb->len;
2349
2350         /* According to the coalesce parameter the IC bit for the latest
2351          * segment is reset and the timer re-started to clean the tx status.
2352          * This approach takes care about the fragments: desc is the first
2353          * element in case of no SG.
2354          */
2355         priv->tx_count_frames += nfrags + 1;
2356         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2357                 mod_timer(&priv->txtimer,
2358                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2359         } else {
2360                 priv->tx_count_frames = 0;
2361                 priv->hw->desc->set_tx_ic(desc);
2362                 priv->xstats.tx_set_ic_bit++;
2363         }
2364
2365         if (!priv->hwts_tx_en)
2366                 skb_tx_timestamp(skb);
2367
2368         /* Ready to fill the first descriptor and set the OWN bit w/o any
2369          * problems because all the descriptors are actually ready to be
2370          * passed to the DMA engine.
2371          */
2372         if (likely(!is_jumbo)) {
2373                 bool last_segment = (nfrags == 0);
2374
2375                 des = dma_map_single(priv->device, skb->data,
2376                                      nopaged_len, DMA_TO_DEVICE);
2377                 if (dma_mapping_error(priv->device, des))
2378                         goto dma_map_err;
2379
2380                 priv->tx_skbuff_dma[first_entry].buf = des;
2381                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2382                         first->des0 = cpu_to_le32(des);
2383                 else
2384                         first->des2 = cpu_to_le32(des);
2385
2386                 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2387                 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2388
2389                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2390                              priv->hwts_tx_en)) {
2391                         /* declare that device is doing timestamping */
2392                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2393                         priv->hw->desc->enable_tx_timestamp(first);
2394                 }
2395
2396                 /* Prepare the first descriptor setting the OWN bit too */
2397                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2398                                                 csum_insertion, priv->mode, 1,
2399                                                 last_segment);
2400
2401                 /* The own bit must be the latest setting done when prepare the
2402                  * descriptor and then barrier is needed to make sure that
2403                  * all is coherent before granting the DMA engine.
2404                  */
2405                 dma_wmb();
2406         }
2407
2408         netdev_sent_queue(dev, skb->len);
2409
2410         if (priv->synopsys_id < DWMAC_CORE_4_00)
2411                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2412         else
2413                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2414                                                STMMAC_CHAN0);
2415
2416         return NETDEV_TX_OK;
2417
2418 dma_map_err:
2419         netdev_err(priv->dev, "Tx DMA map failed\n");
2420         dev_kfree_skb(skb);
2421         priv->dev->stats.tx_dropped++;
2422         return NETDEV_TX_OK;
2423 }
2424
2425 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2426 {
2427         struct ethhdr *ehdr;
2428         u16 vlanid;
2429
2430         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2431             NETIF_F_HW_VLAN_CTAG_RX &&
2432             !__vlan_get_tag(skb, &vlanid)) {
2433                 /* pop the vlan tag */
2434                 ehdr = (struct ethhdr *)skb->data;
2435                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2436                 skb_pull(skb, VLAN_HLEN);
2437                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2438         }
2439 }
2440
2441
2442 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2443 {
2444         if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2445                 return 0;
2446
2447         return 1;
2448 }
2449
2450 /**
2451  * stmmac_rx_refill - refill used skb preallocated buffers
2452  * @priv: driver private structure
2453  * Description : this is to reallocate the skb for the reception process
2454  * that is based on zero-copy.
2455  */
2456 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2457 {
2458         int bfsize = priv->dma_buf_sz;
2459         unsigned int entry = priv->dirty_rx;
2460         int dirty = stmmac_rx_dirty(priv);
2461
2462         while (dirty-- > 0) {
2463                 struct dma_desc *p;
2464
2465                 if (priv->extend_desc)
2466                         p = (struct dma_desc *)(priv->dma_erx + entry);
2467                 else
2468                         p = priv->dma_rx + entry;
2469
2470                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2471                         struct sk_buff *skb;
2472
2473                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2474                         if (unlikely(!skb)) {
2475                                 /* so for a while no zero-copy! */
2476                                 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2477                                 if (unlikely(net_ratelimit()))
2478                                         dev_err(priv->device,
2479                                                 "fail to alloc skb entry %d\n",
2480                                                 entry);
2481                                 break;
2482                         }
2483
2484                         priv->rx_skbuff[entry] = skb;
2485                         priv->rx_skbuff_dma[entry] =
2486                             dma_map_single(priv->device, skb->data, bfsize,
2487                                            DMA_FROM_DEVICE);
2488                         if (dma_mapping_error(priv->device,
2489                                               priv->rx_skbuff_dma[entry])) {
2490                                 netdev_err(priv->dev, "Rx DMA map failed\n");
2491                                 dev_kfree_skb(skb);
2492                                 break;
2493                         }
2494
2495                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2496                                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2497                                 p->des1 = 0;
2498                         } else {
2499                                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2500                         }
2501                         if (priv->hw->mode->refill_desc3)
2502                                 priv->hw->mode->refill_desc3(priv, p);
2503
2504                         if (priv->rx_zeroc_thresh > 0)
2505                                 priv->rx_zeroc_thresh--;
2506
2507                         netif_dbg(priv, rx_status, priv->dev,
2508                                   "refill entry #%d\n", entry);
2509                 }
2510                 dma_wmb();
2511
2512                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2513                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2514                 else
2515                         priv->hw->desc->set_rx_owner(p);
2516
2517                 dma_wmb();
2518
2519                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2520         }
2521         priv->dirty_rx = entry;
2522 }
2523
2524 /**
2525  * stmmac_rx - manage the receive process
2526  * @priv: driver private structure
2527  * @limit: napi bugget.
2528  * Description :  this the function called by the napi poll method.
2529  * It gets all the frames inside the ring.
2530  */
2531 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2532 {
2533         unsigned int entry = priv->cur_rx;
2534         unsigned int next_entry;
2535         unsigned int count = 0;
2536         int coe = priv->hw->rx_csum;
2537
2538         if (netif_msg_rx_status(priv)) {
2539                 void *rx_head;
2540
2541                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2542                 if (priv->extend_desc)
2543                         rx_head = (void *)priv->dma_erx;
2544                 else
2545                         rx_head = (void *)priv->dma_rx;
2546
2547                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2548         }
2549         while (count < limit) {
2550                 int status;
2551                 struct dma_desc *p;
2552                 struct dma_desc *np;
2553
2554                 if (priv->extend_desc)
2555                         p = (struct dma_desc *)(priv->dma_erx + entry);
2556                 else
2557                         p = priv->dma_rx + entry;
2558
2559                 /* read the status of the incoming frame */
2560                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2561                                                    &priv->xstats, p);
2562                 /* check if managed by the DMA otherwise go ahead */
2563                 if (unlikely(status & dma_own))
2564                         break;
2565
2566                 count++;
2567
2568                 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2569                 next_entry = priv->cur_rx;
2570
2571                 if (priv->extend_desc)
2572                         np = (struct dma_desc *)(priv->dma_erx + next_entry);
2573                 else
2574                         np = priv->dma_rx + next_entry;
2575
2576                 prefetch(np);
2577
2578                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2579                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2580                                                            &priv->xstats,
2581                                                            priv->dma_erx +
2582                                                            entry);
2583                 if (unlikely(status == discard_frame)) {
2584                         priv->dev->stats.rx_errors++;
2585                         if (priv->hwts_rx_en && !priv->extend_desc) {
2586                                 /* DESC2 & DESC3 will be overwritten by device
2587                                  * with timestamp value, hence reinitialize
2588                                  * them in stmmac_rx_refill() function so that
2589                                  * device can reuse it.
2590                                  */
2591                                 priv->rx_skbuff[entry] = NULL;
2592                                 dma_unmap_single(priv->device,
2593                                                  priv->rx_skbuff_dma[entry],
2594                                                  priv->dma_buf_sz,
2595                                                  DMA_FROM_DEVICE);
2596                         }
2597                 } else {
2598                         struct sk_buff *skb;
2599                         int frame_len;
2600                         unsigned int des;
2601
2602                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2603                                 des = le32_to_cpu(p->des0);
2604                         else
2605                                 des = le32_to_cpu(p->des2);
2606
2607                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2608
2609                         /*  If frame length is greater than skb buffer size
2610                          *  (preallocated during init) then the packet is
2611                          *  ignored
2612                          */
2613                         if (frame_len > priv->dma_buf_sz) {
2614                                 netdev_err(priv->dev,
2615                                            "len %d larger than size (%d)\n",
2616                                            frame_len, priv->dma_buf_sz);
2617                                 priv->dev->stats.rx_length_errors++;
2618                                 break;
2619                         }
2620
2621                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2622                          * Type frames (LLC/LLC-SNAP)
2623                          */
2624                         if (unlikely(status != llc_snap))
2625                                 frame_len -= ETH_FCS_LEN;
2626
2627                         if (netif_msg_rx_status(priv)) {
2628                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2629                                            p, entry, des);
2630                                 if (frame_len > ETH_FRAME_LEN)
2631                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2632                                                    frame_len, status);
2633                         }
2634
2635                         /* The zero-copy is always used for all the sizes
2636                          * in case of GMAC4 because it needs
2637                          * to refill the used descriptors, always.
2638                          */
2639                         if (unlikely(!priv->plat->has_gmac4 &&
2640                                      ((frame_len < priv->rx_copybreak) ||
2641                                      stmmac_rx_threshold_count(priv)))) {
2642                                 skb = netdev_alloc_skb_ip_align(priv->dev,
2643                                                                 frame_len);
2644                                 if (unlikely(!skb)) {
2645                                         if (net_ratelimit())
2646                                                 dev_warn(priv->device,
2647                                                          "packet dropped\n");
2648                                         priv->dev->stats.rx_dropped++;
2649                                         break;
2650                                 }
2651
2652                                 dma_sync_single_for_cpu(priv->device,
2653                                                         priv->rx_skbuff_dma
2654                                                         [entry], frame_len,
2655                                                         DMA_FROM_DEVICE);
2656                                 skb_copy_to_linear_data(skb,
2657                                                         priv->
2658                                                         rx_skbuff[entry]->data,
2659                                                         frame_len);
2660
2661                                 skb_put(skb, frame_len);
2662                                 dma_sync_single_for_device(priv->device,
2663                                                            priv->rx_skbuff_dma
2664                                                            [entry], frame_len,
2665                                                            DMA_FROM_DEVICE);
2666                         } else {
2667                                 skb = priv->rx_skbuff[entry];
2668                                 if (unlikely(!skb)) {
2669                                         netdev_err(priv->dev,
2670                                                    "%s: Inconsistent Rx chain\n",
2671                                                    priv->dev->name);
2672                                         priv->dev->stats.rx_dropped++;
2673                                         break;
2674                                 }
2675                                 prefetch(skb->data - NET_IP_ALIGN);
2676                                 priv->rx_skbuff[entry] = NULL;
2677                                 priv->rx_zeroc_thresh++;
2678
2679                                 skb_put(skb, frame_len);
2680                                 dma_unmap_single(priv->device,
2681                                                  priv->rx_skbuff_dma[entry],
2682                                                  priv->dma_buf_sz,
2683                                                  DMA_FROM_DEVICE);
2684                         }
2685
2686                         if (netif_msg_pktdata(priv)) {
2687                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
2688                                            frame_len);
2689                                 print_pkt(skb->data, frame_len);
2690                         }
2691
2692                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
2693
2694                         stmmac_rx_vlan(priv->dev, skb);
2695
2696                         skb->protocol = eth_type_trans(skb, priv->dev);
2697
2698                         if (unlikely(!coe))
2699                                 skb_checksum_none_assert(skb);
2700                         else
2701                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2702
2703                         napi_gro_receive(&priv->napi, skb);
2704
2705                         priv->dev->stats.rx_packets++;
2706                         priv->dev->stats.rx_bytes += frame_len;
2707                 }
2708                 entry = next_entry;
2709         }
2710
2711         stmmac_rx_refill(priv);
2712
2713         priv->xstats.rx_pkt_n += count;
2714
2715         return count;
2716 }
2717
2718 /**
2719  *  stmmac_poll - stmmac poll method (NAPI)
2720  *  @napi : pointer to the napi structure.
2721  *  @budget : maximum number of packets that the current CPU can receive from
2722  *            all interfaces.
2723  *  Description :
2724  *  To look at the incoming frames and clear the tx resources.
2725  */
2726 static int stmmac_poll(struct napi_struct *napi, int budget)
2727 {
2728         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2729         int work_done = 0;
2730
2731         priv->xstats.napi_poll++;
2732         stmmac_tx_clean(priv);
2733
2734         work_done = stmmac_rx(priv, budget);
2735         if (work_done < budget) {
2736                 napi_complete_done(napi, work_done);
2737                 stmmac_enable_dma_irq(priv);
2738         }
2739         return work_done;
2740 }
2741
2742 /**
2743  *  stmmac_tx_timeout
2744  *  @dev : Pointer to net device structure
2745  *  Description: this function is called when a packet transmission fails to
2746  *   complete within a reasonable time. The driver will mark the error in the
2747  *   netdev structure and arrange for the device to be reset to a sane state
2748  *   in order to transmit a new packet.
2749  */
2750 static void stmmac_tx_timeout(struct net_device *dev)
2751 {
2752         struct stmmac_priv *priv = netdev_priv(dev);
2753
2754         /* Clear Tx resources and restart transmitting again */
2755         stmmac_tx_err(priv);
2756 }
2757
2758 /**
2759  *  stmmac_set_rx_mode - entry point for multicast addressing
2760  *  @dev : pointer to the device structure
2761  *  Description:
2762  *  This function is a driver entry point which gets called by the kernel
2763  *  whenever multicast addresses must be enabled/disabled.
2764  *  Return value:
2765  *  void.
2766  */
2767 static void stmmac_set_rx_mode(struct net_device *dev)
2768 {
2769         struct stmmac_priv *priv = netdev_priv(dev);
2770
2771         priv->hw->mac->set_filter(priv->hw, dev);
2772 }
2773
2774 /**
2775  *  stmmac_change_mtu - entry point to change MTU size for the device.
2776  *  @dev : device pointer.
2777  *  @new_mtu : the new MTU size for the device.
2778  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2779  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2780  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2781  *  Return value:
2782  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2783  *  file on failure.
2784  */
2785 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2786 {
2787         struct stmmac_priv *priv = netdev_priv(dev);
2788
2789         if (netif_running(dev)) {
2790                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
2791                 return -EBUSY;
2792         }
2793
2794         dev->mtu = new_mtu;
2795
2796         netdev_update_features(dev);
2797
2798         return 0;
2799 }
2800
2801 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2802                                              netdev_features_t features)
2803 {
2804         struct stmmac_priv *priv = netdev_priv(dev);
2805
2806         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2807                 features &= ~NETIF_F_RXCSUM;
2808
2809         if (!priv->plat->tx_coe)
2810                 features &= ~NETIF_F_CSUM_MASK;
2811
2812         /* Some GMAC devices have a bugged Jumbo frame support that
2813          * needs to have the Tx COE disabled for oversized frames
2814          * (due to limited buffer sizes). In this case we disable
2815          * the TX csum insertion in the TDES and not use SF.
2816          */
2817         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2818                 features &= ~NETIF_F_CSUM_MASK;
2819
2820         /* Disable tso if asked by ethtool */
2821         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2822                 if (features & NETIF_F_TSO)
2823                         priv->tso = true;
2824                 else
2825                         priv->tso = false;
2826         }
2827
2828         return features;
2829 }
2830
2831 static int stmmac_set_features(struct net_device *netdev,
2832                                netdev_features_t features)
2833 {
2834         struct stmmac_priv *priv = netdev_priv(netdev);
2835
2836         /* Keep the COE Type in case of csum is supporting */
2837         if (features & NETIF_F_RXCSUM)
2838                 priv->hw->rx_csum = priv->plat->rx_coe;
2839         else
2840                 priv->hw->rx_csum = 0;
2841         /* No check needed because rx_coe has been set before and it will be
2842          * fixed in case of issue.
2843          */
2844         priv->hw->mac->rx_ipc(priv->hw);
2845
2846         return 0;
2847 }
2848
2849 /**
2850  *  stmmac_interrupt - main ISR
2851  *  @irq: interrupt number.
2852  *  @dev_id: to pass the net device pointer.
2853  *  Description: this is the main driver interrupt service routine.
2854  *  It can call:
2855  *  o DMA service routine (to manage incoming frame reception and transmission
2856  *    status)
2857  *  o Core interrupts to manage: remote wake-up, management counter, LPI
2858  *    interrupts.
2859  */
2860 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2861 {
2862         struct net_device *dev = (struct net_device *)dev_id;
2863         struct stmmac_priv *priv = netdev_priv(dev);
2864
2865         if (priv->irq_wake)
2866                 pm_wakeup_event(priv->device, 0);
2867
2868         if (unlikely(!dev)) {
2869                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2870                 return IRQ_NONE;
2871         }
2872
2873         /* To handle GMAC own interrupts */
2874         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2875                 int status = priv->hw->mac->host_irq_status(priv->hw,
2876                                                             &priv->xstats);
2877                 if (unlikely(status)) {
2878                         /* For LPI we need to save the tx status */
2879                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2880                                 priv->tx_path_in_lpi_mode = true;
2881                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2882                                 priv->tx_path_in_lpi_mode = false;
2883                         if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2884                                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2885                                                         priv->rx_tail_addr,
2886                                                         STMMAC_CHAN0);
2887                 }
2888
2889                 /* PCS link status */
2890                 if (priv->hw->pcs) {
2891                         if (priv->xstats.pcs_link)
2892                                 netif_carrier_on(dev);
2893                         else
2894                                 netif_carrier_off(dev);
2895                 }
2896         }
2897
2898         /* To handle DMA interrupts */
2899         stmmac_dma_interrupt(priv);
2900
2901         return IRQ_HANDLED;
2902 }
2903
2904 #ifdef CONFIG_NET_POLL_CONTROLLER
2905 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2906  * to allow network I/O with interrupts disabled.
2907  */
2908 static void stmmac_poll_controller(struct net_device *dev)
2909 {
2910         disable_irq(dev->irq);
2911         stmmac_interrupt(dev->irq, dev);
2912         enable_irq(dev->irq);
2913 }
2914 #endif
2915
2916 /**
2917  *  stmmac_ioctl - Entry point for the Ioctl
2918  *  @dev: Device pointer.
2919  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2920  *  a proprietary structure used to pass information to the driver.
2921  *  @cmd: IOCTL command
2922  *  Description:
2923  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2924  */
2925 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2926 {
2927         int ret = -EOPNOTSUPP;
2928
2929         if (!netif_running(dev))
2930                 return -EINVAL;
2931
2932         switch (cmd) {
2933         case SIOCGMIIPHY:
2934         case SIOCGMIIREG:
2935         case SIOCSMIIREG:
2936                 if (!dev->phydev)
2937                         return -EINVAL;
2938                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2939                 break;
2940         case SIOCSHWTSTAMP:
2941                 ret = stmmac_hwtstamp_ioctl(dev, rq);
2942                 break;
2943         default:
2944                 break;
2945         }
2946
2947         return ret;
2948 }
2949
2950 #ifdef CONFIG_DEBUG_FS
2951 static struct dentry *stmmac_fs_dir;
2952
2953 static void sysfs_display_ring(void *head, int size, int extend_desc,
2954                                struct seq_file *seq)
2955 {
2956         int i;
2957         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2958         struct dma_desc *p = (struct dma_desc *)head;
2959
2960         for (i = 0; i < size; i++) {
2961                 if (extend_desc) {
2962                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2963                                    i, (unsigned int)virt_to_phys(ep),
2964                                    le32_to_cpu(ep->basic.des0),
2965                                    le32_to_cpu(ep->basic.des1),
2966                                    le32_to_cpu(ep->basic.des2),
2967                                    le32_to_cpu(ep->basic.des3));
2968                         ep++;
2969                 } else {
2970                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2971                                    i, (unsigned int)virt_to_phys(ep),
2972                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2973                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2974                         p++;
2975                 }
2976                 seq_printf(seq, "\n");
2977         }
2978 }
2979
2980 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2981 {
2982         struct net_device *dev = seq->private;
2983         struct stmmac_priv *priv = netdev_priv(dev);
2984
2985         if (priv->extend_desc) {
2986                 seq_printf(seq, "Extended RX descriptor ring:\n");
2987                 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2988                 seq_printf(seq, "Extended TX descriptor ring:\n");
2989                 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2990         } else {
2991                 seq_printf(seq, "RX descriptor ring:\n");
2992                 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2993                 seq_printf(seq, "TX descriptor ring:\n");
2994                 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2995         }
2996
2997         return 0;
2998 }
2999
3000 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3001 {
3002         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3003 }
3004
3005 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3006
3007 static const struct file_operations stmmac_rings_status_fops = {
3008         .owner = THIS_MODULE,
3009         .open = stmmac_sysfs_ring_open,
3010         .read = seq_read,
3011         .llseek = seq_lseek,
3012         .release = single_release,
3013 };
3014
3015 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3016 {
3017         struct net_device *dev = seq->private;
3018         struct stmmac_priv *priv = netdev_priv(dev);
3019
3020         if (!priv->hw_cap_support) {
3021                 seq_printf(seq, "DMA HW features not supported\n");
3022                 return 0;
3023         }
3024
3025         seq_printf(seq, "==============================\n");
3026         seq_printf(seq, "\tDMA HW features\n");
3027         seq_printf(seq, "==============================\n");
3028
3029         seq_printf(seq, "\t10/100 Mbps: %s\n",
3030                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3031         seq_printf(seq, "\t1000 Mbps: %s\n",
3032                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3033         seq_printf(seq, "\tHalf duplex: %s\n",
3034                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3035         seq_printf(seq, "\tHash Filter: %s\n",
3036                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3037         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3038                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3039         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3040                    (priv->dma_cap.pcs) ? "Y" : "N");
3041         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3042                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3043         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3044                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3045         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3046                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3047         seq_printf(seq, "\tRMON module: %s\n",
3048                    (priv->dma_cap.rmon) ? "Y" : "N");
3049         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3050                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3051         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3052                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3053         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3054                    (priv->dma_cap.eee) ? "Y" : "N");
3055         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3056         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3057                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3058         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3059                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3060                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3061         } else {
3062                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3063                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3064                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3065                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3066         }
3067         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3068                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3069         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3070                    priv->dma_cap.number_rx_channel);
3071         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3072                    priv->dma_cap.number_tx_channel);
3073         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3074                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3075
3076         return 0;
3077 }
3078
3079 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3080 {
3081         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3082 }
3083
3084 static const struct file_operations stmmac_dma_cap_fops = {
3085         .owner = THIS_MODULE,
3086         .open = stmmac_sysfs_dma_cap_open,
3087         .read = seq_read,
3088         .llseek = seq_lseek,
3089         .release = single_release,
3090 };
3091
3092 static int stmmac_init_fs(struct net_device *dev)
3093 {
3094         struct stmmac_priv *priv = netdev_priv(dev);
3095
3096         /* Create per netdev entries */
3097         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3098
3099         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3100                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3101
3102                 return -ENOMEM;
3103         }
3104
3105         /* Entry to report DMA RX/TX rings */
3106         priv->dbgfs_rings_status =
3107                 debugfs_create_file("descriptors_status", S_IRUGO,
3108                                     priv->dbgfs_dir, dev,
3109                                     &stmmac_rings_status_fops);
3110
3111         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3112                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3113                 debugfs_remove_recursive(priv->dbgfs_dir);
3114
3115                 return -ENOMEM;
3116         }
3117
3118         /* Entry to report the DMA HW features */
3119         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3120                                             priv->dbgfs_dir,
3121                                             dev, &stmmac_dma_cap_fops);
3122
3123         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3124                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3125                 debugfs_remove_recursive(priv->dbgfs_dir);
3126
3127                 return -ENOMEM;
3128         }
3129
3130         return 0;
3131 }
3132
3133 static void stmmac_exit_fs(struct net_device *dev)
3134 {
3135         struct stmmac_priv *priv = netdev_priv(dev);
3136
3137         debugfs_remove_recursive(priv->dbgfs_dir);
3138 }
3139 #endif /* CONFIG_DEBUG_FS */
3140
3141 static const struct net_device_ops stmmac_netdev_ops = {
3142         .ndo_open = stmmac_open,
3143         .ndo_start_xmit = stmmac_xmit,
3144         .ndo_stop = stmmac_release,
3145         .ndo_change_mtu = stmmac_change_mtu,
3146         .ndo_fix_features = stmmac_fix_features,
3147         .ndo_set_features = stmmac_set_features,
3148         .ndo_set_rx_mode = stmmac_set_rx_mode,
3149         .ndo_tx_timeout = stmmac_tx_timeout,
3150         .ndo_do_ioctl = stmmac_ioctl,
3151 #ifdef CONFIG_NET_POLL_CONTROLLER
3152         .ndo_poll_controller = stmmac_poll_controller,
3153 #endif
3154         .ndo_set_mac_address = eth_mac_addr,
3155 };
3156
3157 /**
3158  *  stmmac_hw_init - Init the MAC device
3159  *  @priv: driver private structure
3160  *  Description: this function is to configure the MAC device according to
3161  *  some platform parameters or the HW capability register. It prepares the
3162  *  driver to use either ring or chain modes and to setup either enhanced or
3163  *  normal descriptors.
3164  */
3165 static int stmmac_hw_init(struct stmmac_priv *priv)
3166 {
3167         struct mac_device_info *mac;
3168
3169         /* Identify the MAC HW device */
3170         if (priv->plat->has_gmac) {
3171                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3172                 mac = dwmac1000_setup(priv->ioaddr,
3173                                       priv->plat->multicast_filter_bins,
3174                                       priv->plat->unicast_filter_entries,
3175                                       &priv->synopsys_id);
3176         } else if (priv->plat->has_gmac4) {
3177                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3178                 mac = dwmac4_setup(priv->ioaddr,
3179                                    priv->plat->multicast_filter_bins,
3180                                    priv->plat->unicast_filter_entries,
3181                                    &priv->synopsys_id);
3182         } else {
3183                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3184         }
3185         if (!mac)
3186                 return -ENOMEM;
3187
3188         priv->hw = mac;
3189
3190         /* To use the chained or ring mode */
3191         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3192                 priv->hw->mode = &dwmac4_ring_mode_ops;
3193         } else {
3194                 if (chain_mode) {
3195                         priv->hw->mode = &chain_mode_ops;
3196                         dev_info(priv->device, "Chain mode enabled\n");
3197                         priv->mode = STMMAC_CHAIN_MODE;
3198                 } else {
3199                         priv->hw->mode = &ring_mode_ops;
3200                         dev_info(priv->device, "Ring mode enabled\n");
3201                         priv->mode = STMMAC_RING_MODE;
3202                 }
3203         }
3204
3205         /* Get the HW capability (new GMAC newer than 3.50a) */
3206         priv->hw_cap_support = stmmac_get_hw_features(priv);
3207         if (priv->hw_cap_support) {
3208                 dev_info(priv->device, "DMA HW capability register supported\n");
3209
3210                 /* We can override some gmac/dma configuration fields: e.g.
3211                  * enh_desc, tx_coe (e.g. that are passed through the
3212                  * platform) with the values from the HW capability
3213                  * register (if supported).
3214                  */
3215                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3216                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3217                 priv->hw->pmt = priv->plat->pmt;
3218
3219                 /* TXCOE doesn't work in thresh DMA mode */
3220                 if (priv->plat->force_thresh_dma_mode)
3221                         priv->plat->tx_coe = 0;
3222                 else
3223                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3224
3225                 /* In case of GMAC4 rx_coe is from HW cap register. */
3226                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3227
3228                 if (priv->dma_cap.rx_coe_type2)
3229                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3230                 else if (priv->dma_cap.rx_coe_type1)
3231                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3232
3233         } else {
3234                 dev_info(priv->device, "No HW DMA feature register supported\n");
3235         }
3236
3237         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3238         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3239                 priv->hw->desc = &dwmac4_desc_ops;
3240         else
3241                 stmmac_selec_desc_mode(priv);
3242
3243         if (priv->plat->rx_coe) {
3244                 priv->hw->rx_csum = priv->plat->rx_coe;
3245                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3246                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3247                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3248         }
3249         if (priv->plat->tx_coe)
3250                 dev_info(priv->device, "TX Checksum insertion supported\n");
3251
3252         if (priv->plat->pmt) {
3253                 dev_info(priv->device, "Wake-Up On Lan supported\n");
3254                 device_set_wakeup_capable(priv->device, 1);
3255         }
3256
3257         if (priv->dma_cap.tsoen)
3258                 dev_info(priv->device, "TSO supported\n");
3259
3260         return 0;
3261 }
3262
3263 /**
3264  * stmmac_dvr_probe
3265  * @device: device pointer
3266  * @plat_dat: platform data pointer
3267  * @res: stmmac resource pointer
3268  * Description: this is the main probe function used to
3269  * call the alloc_etherdev, allocate the priv structure.
3270  * Return:
3271  * returns 0 on success, otherwise errno.
3272  */
3273 int stmmac_dvr_probe(struct device *device,
3274                      struct plat_stmmacenet_data *plat_dat,
3275                      struct stmmac_resources *res)
3276 {
3277         int ret = 0;
3278         struct net_device *ndev = NULL;
3279         struct stmmac_priv *priv;
3280
3281         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3282         if (!ndev)
3283                 return -ENOMEM;
3284
3285         SET_NETDEV_DEV(ndev, device);
3286
3287         priv = netdev_priv(ndev);
3288         priv->device = device;
3289         priv->dev = ndev;
3290
3291         stmmac_set_ethtool_ops(ndev);
3292         priv->pause = pause;
3293         priv->plat = plat_dat;
3294         priv->ioaddr = res->addr;
3295         priv->dev->base_addr = (unsigned long)res->addr;
3296
3297         priv->dev->irq = res->irq;
3298         priv->wol_irq = res->wol_irq;
3299         priv->lpi_irq = res->lpi_irq;
3300
3301         if (res->mac)
3302                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3303
3304         dev_set_drvdata(device, priv->dev);
3305
3306         /* Verify driver arguments */
3307         stmmac_verify_args();
3308
3309         /* Override with kernel parameters if supplied XXX CRS XXX
3310          * this needs to have multiple instances
3311          */
3312         if ((phyaddr >= 0) && (phyaddr <= 31))
3313                 priv->plat->phy_addr = phyaddr;
3314
3315         if (priv->plat->stmmac_rst)
3316                 reset_control_deassert(priv->plat->stmmac_rst);
3317
3318         /* Init MAC and get the capabilities */
3319         ret = stmmac_hw_init(priv);
3320         if (ret)
3321                 goto error_hw_init;
3322
3323         ndev->netdev_ops = &stmmac_netdev_ops;
3324
3325         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3326                             NETIF_F_RXCSUM;
3327
3328         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3329                 ndev->hw_features |= NETIF_F_TSO;
3330                 priv->tso = true;
3331                 dev_info(priv->device, "TSO feature enabled\n");
3332         }
3333         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3334         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3335 #ifdef STMMAC_VLAN_TAG_USED
3336         /* Both mac100 and gmac support receive VLAN tag detection */
3337         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3338 #endif
3339         priv->msg_enable = netif_msg_init(debug, default_msg_level);
3340
3341         /* MTU range: 46 - hw-specific max */
3342         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3343         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3344                 ndev->max_mtu = JUMBO_LEN;
3345         else
3346                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3347         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3348          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3349          */
3350         if ((priv->plat->maxmtu < ndev->max_mtu) &&
3351             (priv->plat->maxmtu >= ndev->min_mtu))
3352                 ndev->max_mtu = priv->plat->maxmtu;
3353         else if (priv->plat->maxmtu < ndev->min_mtu)
3354                 dev_warn(priv->device,
3355                          "%s: warning: maxmtu having invalid value (%d)\n",
3356                          __func__, priv->plat->maxmtu);
3357
3358         if (flow_ctrl)
3359                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
3360
3361         /* Rx Watchdog is available in the COREs newer than the 3.40.
3362          * In some case, for example on bugged HW this feature
3363          * has to be disable and this can be done by passing the
3364          * riwt_off field from the platform.
3365          */
3366         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3367                 priv->use_riwt = 1;
3368                 dev_info(priv->device,
3369                          "Enable RX Mitigation via HW Watchdog Timer\n");
3370         }
3371
3372         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3373
3374         spin_lock_init(&priv->lock);
3375
3376         /* If a specific clk_csr value is passed from the platform
3377          * this means that the CSR Clock Range selection cannot be
3378          * changed at run-time and it is fixed. Viceversa the driver'll try to
3379          * set the MDC clock dynamically according to the csr actual
3380          * clock input.
3381          */
3382         if (!priv->plat->clk_csr)
3383                 stmmac_clk_csr_set(priv);
3384         else
3385                 priv->clk_csr = priv->plat->clk_csr;
3386
3387         stmmac_check_pcs_mode(priv);
3388
3389         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
3390             priv->hw->pcs != STMMAC_PCS_TBI &&
3391             priv->hw->pcs != STMMAC_PCS_RTBI) {
3392                 /* MDIO bus Registration */
3393                 ret = stmmac_mdio_register(ndev);
3394                 if (ret < 0) {
3395                         dev_err(priv->device,
3396                                 "%s: MDIO bus (id: %d) registration failed",
3397                                 __func__, priv->plat->bus_id);
3398                         goto error_mdio_register;
3399                 }
3400         }
3401
3402         ret = register_netdev(ndev);
3403         if (ret) {
3404                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3405                         __func__, ret);
3406                 goto error_netdev_register;
3407         }
3408
3409         return ret;
3410
3411 error_netdev_register:
3412         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3413             priv->hw->pcs != STMMAC_PCS_TBI &&
3414             priv->hw->pcs != STMMAC_PCS_RTBI)
3415                 stmmac_mdio_unregister(ndev);
3416 error_mdio_register:
3417         netif_napi_del(&priv->napi);
3418 error_hw_init:
3419         free_netdev(ndev);
3420
3421         return ret;
3422 }
3423 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3424
3425 /**
3426  * stmmac_dvr_remove
3427  * @dev: device pointer
3428  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3429  * changes the link status, releases the DMA descriptor rings.
3430  */
3431 int stmmac_dvr_remove(struct device *dev)
3432 {
3433         struct net_device *ndev = dev_get_drvdata(dev);
3434         struct stmmac_priv *priv = netdev_priv(ndev);
3435
3436         netdev_info(priv->dev, "%s: removing driver", __func__);
3437
3438         priv->hw->dma->stop_rx(priv->ioaddr);
3439         priv->hw->dma->stop_tx(priv->ioaddr);
3440
3441         stmmac_set_mac(priv->ioaddr, false);
3442         netif_carrier_off(ndev);
3443         unregister_netdev(ndev);
3444         if (priv->plat->stmmac_rst)
3445                 reset_control_assert(priv->plat->stmmac_rst);
3446         clk_disable_unprepare(priv->plat->pclk);
3447         clk_disable_unprepare(priv->plat->stmmac_clk);
3448         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3449             priv->hw->pcs != STMMAC_PCS_TBI &&
3450             priv->hw->pcs != STMMAC_PCS_RTBI)
3451                 stmmac_mdio_unregister(ndev);
3452         free_netdev(ndev);
3453
3454         return 0;
3455 }
3456 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3457
3458 /**
3459  * stmmac_suspend - suspend callback
3460  * @dev: device pointer
3461  * Description: this is the function to suspend the device and it is called
3462  * by the platform driver to stop the network queue, release the resources,
3463  * program the PMT register (for WoL), clean and release driver resources.
3464  */
3465 int stmmac_suspend(struct device *dev)
3466 {
3467         struct net_device *ndev = dev_get_drvdata(dev);
3468         struct stmmac_priv *priv = netdev_priv(ndev);
3469         unsigned long flags;
3470
3471         if (!ndev || !netif_running(ndev))
3472                 return 0;
3473
3474         if (ndev->phydev)
3475                 phy_stop(ndev->phydev);
3476
3477         spin_lock_irqsave(&priv->lock, flags);
3478
3479         netif_device_detach(ndev);
3480         netif_stop_queue(ndev);
3481
3482         napi_disable(&priv->napi);
3483
3484         /* Stop TX/RX DMA */
3485         priv->hw->dma->stop_tx(priv->ioaddr);
3486         priv->hw->dma->stop_rx(priv->ioaddr);
3487
3488         /* Enable Power down mode by programming the PMT regs */
3489         if (device_may_wakeup(priv->device)) {
3490                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3491                 priv->irq_wake = 1;
3492         } else {
3493                 stmmac_set_mac(priv->ioaddr, false);
3494                 pinctrl_pm_select_sleep_state(priv->device);
3495                 /* Disable clock in case of PWM is off */
3496                 clk_disable(priv->plat->pclk);
3497                 clk_disable(priv->plat->stmmac_clk);
3498         }
3499         spin_unlock_irqrestore(&priv->lock, flags);
3500
3501         priv->oldlink = 0;
3502         priv->speed = SPEED_UNKNOWN;
3503         priv->oldduplex = DUPLEX_UNKNOWN;
3504         return 0;
3505 }
3506 EXPORT_SYMBOL_GPL(stmmac_suspend);
3507
3508 /**
3509  * stmmac_resume - resume callback
3510  * @dev: device pointer
3511  * Description: when resume this function is invoked to setup the DMA and CORE
3512  * in a usable state.
3513  */
3514 int stmmac_resume(struct device *dev)
3515 {
3516         struct net_device *ndev = dev_get_drvdata(dev);
3517         struct stmmac_priv *priv = netdev_priv(ndev);
3518         unsigned long flags;
3519
3520         if (!netif_running(ndev))
3521                 return 0;
3522
3523         /* Power Down bit, into the PM register, is cleared
3524          * automatically as soon as a magic packet or a Wake-up frame
3525          * is received. Anyway, it's better to manually clear
3526          * this bit because it can generate problems while resuming
3527          * from another devices (e.g. serial console).
3528          */
3529         if (device_may_wakeup(priv->device)) {
3530                 spin_lock_irqsave(&priv->lock, flags);
3531                 priv->hw->mac->pmt(priv->hw, 0);
3532                 spin_unlock_irqrestore(&priv->lock, flags);
3533                 priv->irq_wake = 0;
3534         } else {
3535                 pinctrl_pm_select_default_state(priv->device);
3536                 /* enable the clk previously disabled */
3537                 clk_enable(priv->plat->stmmac_clk);
3538                 clk_enable(priv->plat->pclk);
3539                 /* reset the phy so that it's ready */
3540                 if (priv->mii)
3541                         stmmac_mdio_reset(priv->mii);
3542         }
3543
3544         netif_device_attach(ndev);
3545
3546         spin_lock_irqsave(&priv->lock, flags);
3547
3548         priv->cur_rx = 0;
3549         priv->dirty_rx = 0;
3550         priv->dirty_tx = 0;
3551         priv->cur_tx = 0;
3552         /* reset private mss value to force mss context settings at
3553          * next tso xmit (only used for gmac4).
3554          */
3555         priv->mss = 0;
3556
3557         stmmac_clear_descriptors(priv);
3558
3559         stmmac_hw_setup(ndev, false);
3560         stmmac_init_tx_coalesce(priv);
3561         stmmac_set_rx_mode(ndev);
3562
3563         napi_enable(&priv->napi);
3564
3565         netif_start_queue(ndev);
3566
3567         spin_unlock_irqrestore(&priv->lock, flags);
3568
3569         if (ndev->phydev)
3570                 phy_start(ndev->phydev);
3571
3572         return 0;
3573 }
3574 EXPORT_SYMBOL_GPL(stmmac_resume);
3575
3576 #ifndef MODULE
3577 static int __init stmmac_cmdline_opt(char *str)
3578 {
3579         char *opt;
3580
3581         if (!str || !*str)
3582                 return -EINVAL;
3583         while ((opt = strsep(&str, ",")) != NULL) {
3584                 if (!strncmp(opt, "debug:", 6)) {
3585                         if (kstrtoint(opt + 6, 0, &debug))
3586                                 goto err;
3587                 } else if (!strncmp(opt, "phyaddr:", 8)) {
3588                         if (kstrtoint(opt + 8, 0, &phyaddr))
3589                                 goto err;
3590                 } else if (!strncmp(opt, "buf_sz:", 7)) {
3591                         if (kstrtoint(opt + 7, 0, &buf_sz))
3592                                 goto err;
3593                 } else if (!strncmp(opt, "tc:", 3)) {
3594                         if (kstrtoint(opt + 3, 0, &tc))
3595                                 goto err;
3596                 } else if (!strncmp(opt, "watchdog:", 9)) {
3597                         if (kstrtoint(opt + 9, 0, &watchdog))
3598                                 goto err;
3599                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3600                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3601                                 goto err;
3602                 } else if (!strncmp(opt, "pause:", 6)) {
3603                         if (kstrtoint(opt + 6, 0, &pause))
3604                                 goto err;
3605                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3606                         if (kstrtoint(opt + 10, 0, &eee_timer))
3607                                 goto err;
3608                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3609                         if (kstrtoint(opt + 11, 0, &chain_mode))
3610                                 goto err;
3611                 }
3612         }
3613         return 0;
3614
3615 err:
3616         pr_err("%s: ERROR broken module parameter conversion", __func__);
3617         return -EINVAL;
3618 }
3619
3620 __setup("stmmaceth=", stmmac_cmdline_opt);
3621 #endif /* MODULE */
3622
3623 static int __init stmmac_init(void)
3624 {
3625 #ifdef CONFIG_DEBUG_FS
3626         /* Create debugfs main directory if it doesn't exist yet */
3627         if (!stmmac_fs_dir) {
3628                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3629
3630                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3631                         pr_err("ERROR %s, debugfs create directory failed\n",
3632                                STMMAC_RESOURCE_NAME);
3633
3634                         return -ENOMEM;
3635                 }
3636         }
3637 #endif
3638
3639         return 0;
3640 }
3641
3642 static void __exit stmmac_exit(void)
3643 {
3644 #ifdef CONFIG_DEBUG_FS
3645         debugfs_remove_recursive(stmmac_fs_dir);
3646 #endif
3647 }
3648
3649 module_init(stmmac_init)
3650 module_exit(stmmac_exit)
3651
3652 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3653 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3654 MODULE_LICENSE("GPL");