]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: adding multiple buffers for TX
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
189 {
190         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
191         u32 avail;
192
193         if (tx_q->dirty_tx > tx_q->cur_tx)
194                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
195         else
196                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
197
198         return avail;
199 }
200
201 /**
202  * stmmac_rx_dirty - Get RX queue dirty
203  * @priv: driver private structure
204  * @queue: RX queue index
205  */
206 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
207 {
208         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
209         u32 dirty;
210
211         if (rx_q->dirty_rx <= rx_q->cur_rx)
212                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
213         else
214                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
215
216         return dirty;
217 }
218
219 /**
220  * stmmac_hw_fix_mac_speed - callback for speed selection
221  * @priv: driver private structure
222  * Description: on some platforms (e.g. ST), some HW system configuration
223  * registers have to be set according to the link speed negotiated.
224  */
225 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
226 {
227         struct net_device *ndev = priv->dev;
228         struct phy_device *phydev = ndev->phydev;
229
230         if (likely(priv->plat->fix_mac_speed))
231                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
232 }
233
234 /**
235  * stmmac_enable_eee_mode - check and enter in LPI mode
236  * @priv: driver private structure
237  * Description: this function is to verify and enter in LPI mode in case of
238  * EEE.
239  */
240 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
241 {
242         u32 tx_cnt = priv->plat->tx_queues_to_use;
243         u32 queue;
244
245         /* check if all TX queues have the work finished */
246         for (queue = 0; queue < tx_cnt; queue++) {
247                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
248
249                 if (tx_q->dirty_tx != tx_q->cur_tx)
250                         return; /* still unfinished work */
251         }
252
253         /* Check and enter in LPI mode */
254         if (!priv->tx_path_in_lpi_mode)
255                 priv->hw->mac->set_eee_mode(priv->hw,
256                                             priv->plat->en_tx_lpi_clockgating);
257 }
258
259 /**
260  * stmmac_disable_eee_mode - disable and exit from LPI mode
261  * @priv: driver private structure
262  * Description: this function is to exit and disable EEE in case of
263  * LPI state is true. This is called by the xmit.
264  */
265 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
266 {
267         priv->hw->mac->reset_eee_mode(priv->hw);
268         del_timer_sync(&priv->eee_ctrl_timer);
269         priv->tx_path_in_lpi_mode = false;
270 }
271
272 /**
273  * stmmac_eee_ctrl_timer - EEE TX SW timer.
274  * @arg : data hook
275  * Description:
276  *  if there is no data transfer and if we are not in LPI state,
277  *  then MAC Transmitter can be moved to LPI state.
278  */
279 static void stmmac_eee_ctrl_timer(unsigned long arg)
280 {
281         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
282
283         stmmac_enable_eee_mode(priv);
284         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
285 }
286
287 /**
288  * stmmac_eee_init - init EEE
289  * @priv: driver private structure
290  * Description:
291  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
292  *  can also manage EEE, this function enable the LPI state and start related
293  *  timer.
294  */
295 bool stmmac_eee_init(struct stmmac_priv *priv)
296 {
297         struct net_device *ndev = priv->dev;
298         unsigned long flags;
299         bool ret = false;
300
301         /* Using PCS we cannot dial with the phy registers at this stage
302          * so we do not support extra feature like EEE.
303          */
304         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
305             (priv->hw->pcs == STMMAC_PCS_TBI) ||
306             (priv->hw->pcs == STMMAC_PCS_RTBI))
307                 goto out;
308
309         /* MAC core supports the EEE feature. */
310         if (priv->dma_cap.eee) {
311                 int tx_lpi_timer = priv->tx_lpi_timer;
312
313                 /* Check if the PHY supports EEE */
314                 if (phy_init_eee(ndev->phydev, 1)) {
315                         /* To manage at run-time if the EEE cannot be supported
316                          * anymore (for example because the lp caps have been
317                          * changed).
318                          * In that case the driver disable own timers.
319                          */
320                         spin_lock_irqsave(&priv->lock, flags);
321                         if (priv->eee_active) {
322                                 netdev_dbg(priv->dev, "disable EEE\n");
323                                 del_timer_sync(&priv->eee_ctrl_timer);
324                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
325                                                              tx_lpi_timer);
326                         }
327                         priv->eee_active = 0;
328                         spin_unlock_irqrestore(&priv->lock, flags);
329                         goto out;
330                 }
331                 /* Activate the EEE and start timers */
332                 spin_lock_irqsave(&priv->lock, flags);
333                 if (!priv->eee_active) {
334                         priv->eee_active = 1;
335                         setup_timer(&priv->eee_ctrl_timer,
336                                     stmmac_eee_ctrl_timer,
337                                     (unsigned long)priv);
338                         mod_timer(&priv->eee_ctrl_timer,
339                                   STMMAC_LPI_T(eee_timer));
340
341                         priv->hw->mac->set_eee_timer(priv->hw,
342                                                      STMMAC_DEFAULT_LIT_LS,
343                                                      tx_lpi_timer);
344                 }
345                 /* Set HW EEE according to the speed */
346                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
347
348                 ret = true;
349                 spin_unlock_irqrestore(&priv->lock, flags);
350
351                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
352         }
353 out:
354         return ret;
355 }
356
357 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
358  * @priv: driver private structure
359  * @p : descriptor pointer
360  * @skb : the socket buffer
361  * Description :
362  * This function will read timestamp from the descriptor & pass it to stack.
363  * and also perform some sanity checks.
364  */
365 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
366                                    struct dma_desc *p, struct sk_buff *skb)
367 {
368         struct skb_shared_hwtstamps shhwtstamp;
369         u64 ns;
370
371         if (!priv->hwts_tx_en)
372                 return;
373
374         /* exit if skb doesn't support hw tstamp */
375         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
376                 return;
377
378         /* check tx tstamp status */
379         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
380                 /* get the valid tstamp */
381                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
382
383                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
384                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
385
386                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
387                 /* pass tstamp to stack */
388                 skb_tstamp_tx(skb, &shhwtstamp);
389         }
390
391         return;
392 }
393
394 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
395  * @priv: driver private structure
396  * @p : descriptor pointer
397  * @np : next descriptor pointer
398  * @skb : the socket buffer
399  * Description :
400  * This function will read received packet's timestamp from the descriptor
401  * and pass it to stack. It also perform some sanity checks.
402  */
403 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
404                                    struct dma_desc *np, struct sk_buff *skb)
405 {
406         struct skb_shared_hwtstamps *shhwtstamp = NULL;
407         u64 ns;
408
409         if (!priv->hwts_rx_en)
410                 return;
411
412         /* Check if timestamp is available */
413         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
414                 /* For GMAC4, the valid timestamp is from CTX next desc. */
415                 if (priv->plat->has_gmac4)
416                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
417                 else
418                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
419
420                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
421                 shhwtstamp = skb_hwtstamps(skb);
422                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
423                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
424         } else  {
425                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
426         }
427 }
428
429 /**
430  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
431  *  @dev: device pointer.
432  *  @ifr: An IOCTL specific structure, that can contain a pointer to
433  *  a proprietary structure used to pass information to the driver.
434  *  Description:
435  *  This function configures the MAC to enable/disable both outgoing(TX)
436  *  and incoming(RX) packets time stamping based on user input.
437  *  Return Value:
438  *  0 on success and an appropriate -ve integer on failure.
439  */
440 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
441 {
442         struct stmmac_priv *priv = netdev_priv(dev);
443         struct hwtstamp_config config;
444         struct timespec64 now;
445         u64 temp = 0;
446         u32 ptp_v2 = 0;
447         u32 tstamp_all = 0;
448         u32 ptp_over_ipv4_udp = 0;
449         u32 ptp_over_ipv6_udp = 0;
450         u32 ptp_over_ethernet = 0;
451         u32 snap_type_sel = 0;
452         u32 ts_master_en = 0;
453         u32 ts_event_en = 0;
454         u32 value = 0;
455         u32 sec_inc;
456
457         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
458                 netdev_alert(priv->dev, "No support for HW time stamping\n");
459                 priv->hwts_tx_en = 0;
460                 priv->hwts_rx_en = 0;
461
462                 return -EOPNOTSUPP;
463         }
464
465         if (copy_from_user(&config, ifr->ifr_data,
466                            sizeof(struct hwtstamp_config)))
467                 return -EFAULT;
468
469         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
470                    __func__, config.flags, config.tx_type, config.rx_filter);
471
472         /* reserved for future extensions */
473         if (config.flags)
474                 return -EINVAL;
475
476         if (config.tx_type != HWTSTAMP_TX_OFF &&
477             config.tx_type != HWTSTAMP_TX_ON)
478                 return -ERANGE;
479
480         if (priv->adv_ts) {
481                 switch (config.rx_filter) {
482                 case HWTSTAMP_FILTER_NONE:
483                         /* time stamp no incoming packet at all */
484                         config.rx_filter = HWTSTAMP_FILTER_NONE;
485                         break;
486
487                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
488                         /* PTP v1, UDP, any kind of event packet */
489                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
490                         /* take time stamp for all event messages */
491                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
492
493                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
494                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
495                         break;
496
497                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
498                         /* PTP v1, UDP, Sync packet */
499                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
500                         /* take time stamp for SYNC messages only */
501                         ts_event_en = PTP_TCR_TSEVNTENA;
502
503                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
504                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
505                         break;
506
507                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
508                         /* PTP v1, UDP, Delay_req packet */
509                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
510                         /* take time stamp for Delay_Req messages only */
511                         ts_master_en = PTP_TCR_TSMSTRENA;
512                         ts_event_en = PTP_TCR_TSEVNTENA;
513
514                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
515                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
516                         break;
517
518                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
519                         /* PTP v2, UDP, any kind of event packet */
520                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
521                         ptp_v2 = PTP_TCR_TSVER2ENA;
522                         /* take time stamp for all event messages */
523                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
524
525                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
526                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
527                         break;
528
529                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
530                         /* PTP v2, UDP, Sync packet */
531                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
532                         ptp_v2 = PTP_TCR_TSVER2ENA;
533                         /* take time stamp for SYNC messages only */
534                         ts_event_en = PTP_TCR_TSEVNTENA;
535
536                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
537                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
538                         break;
539
540                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
541                         /* PTP v2, UDP, Delay_req packet */
542                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
543                         ptp_v2 = PTP_TCR_TSVER2ENA;
544                         /* take time stamp for Delay_Req messages only */
545                         ts_master_en = PTP_TCR_TSMSTRENA;
546                         ts_event_en = PTP_TCR_TSEVNTENA;
547
548                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
549                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
550                         break;
551
552                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
553                         /* PTP v2/802.AS1 any layer, any kind of event packet */
554                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
555                         ptp_v2 = PTP_TCR_TSVER2ENA;
556                         /* take time stamp for all event messages */
557                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
558
559                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
560                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
561                         ptp_over_ethernet = PTP_TCR_TSIPENA;
562                         break;
563
564                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
565                         /* PTP v2/802.AS1, any layer, Sync packet */
566                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
567                         ptp_v2 = PTP_TCR_TSVER2ENA;
568                         /* take time stamp for SYNC messages only */
569                         ts_event_en = PTP_TCR_TSEVNTENA;
570
571                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
572                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
573                         ptp_over_ethernet = PTP_TCR_TSIPENA;
574                         break;
575
576                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
577                         /* PTP v2/802.AS1, any layer, Delay_req packet */
578                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
579                         ptp_v2 = PTP_TCR_TSVER2ENA;
580                         /* take time stamp for Delay_Req messages only */
581                         ts_master_en = PTP_TCR_TSMSTRENA;
582                         ts_event_en = PTP_TCR_TSEVNTENA;
583
584                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586                         ptp_over_ethernet = PTP_TCR_TSIPENA;
587                         break;
588
589                 case HWTSTAMP_FILTER_ALL:
590                         /* time stamp any incoming packet */
591                         config.rx_filter = HWTSTAMP_FILTER_ALL;
592                         tstamp_all = PTP_TCR_TSENALL;
593                         break;
594
595                 default:
596                         return -ERANGE;
597                 }
598         } else {
599                 switch (config.rx_filter) {
600                 case HWTSTAMP_FILTER_NONE:
601                         config.rx_filter = HWTSTAMP_FILTER_NONE;
602                         break;
603                 default:
604                         /* PTP v1, UDP, any kind of event packet */
605                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
606                         break;
607                 }
608         }
609         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
610         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
611
612         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
613                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
614         else {
615                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
616                          tstamp_all | ptp_v2 | ptp_over_ethernet |
617                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
618                          ts_master_en | snap_type_sel);
619                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
620
621                 /* program Sub Second Increment reg */
622                 sec_inc = priv->hw->ptp->config_sub_second_increment(
623                         priv->ptpaddr, priv->plat->clk_ptp_rate,
624                         priv->plat->has_gmac4);
625                 temp = div_u64(1000000000ULL, sec_inc);
626
627                 /* calculate default added value:
628                  * formula is :
629                  * addend = (2^32)/freq_div_ratio;
630                  * where, freq_div_ratio = 1e9ns/sec_inc
631                  */
632                 temp = (u64)(temp << 32);
633                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
634                 priv->hw->ptp->config_addend(priv->ptpaddr,
635                                              priv->default_addend);
636
637                 /* initialize system time */
638                 ktime_get_real_ts64(&now);
639
640                 /* lower 32 bits of tv_sec are safe until y2106 */
641                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
642                                             now.tv_nsec);
643         }
644
645         return copy_to_user(ifr->ifr_data, &config,
646                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
647 }
648
649 /**
650  * stmmac_init_ptp - init PTP
651  * @priv: driver private structure
652  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
653  * This is done by looking at the HW cap. register.
654  * This function also registers the ptp driver.
655  */
656 static int stmmac_init_ptp(struct stmmac_priv *priv)
657 {
658         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
659                 return -EOPNOTSUPP;
660
661         priv->adv_ts = 0;
662         /* Check if adv_ts can be enabled for dwmac 4.x core */
663         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
664                 priv->adv_ts = 1;
665         /* Dwmac 3.x core with extend_desc can support adv_ts */
666         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
667                 priv->adv_ts = 1;
668
669         if (priv->dma_cap.time_stamp)
670                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
671
672         if (priv->adv_ts)
673                 netdev_info(priv->dev,
674                             "IEEE 1588-2008 Advanced Timestamp supported\n");
675
676         priv->hw->ptp = &stmmac_ptp;
677         priv->hwts_tx_en = 0;
678         priv->hwts_rx_en = 0;
679
680         stmmac_ptp_register(priv);
681
682         return 0;
683 }
684
685 static void stmmac_release_ptp(struct stmmac_priv *priv)
686 {
687         if (priv->plat->clk_ptp_ref)
688                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
689         stmmac_ptp_unregister(priv);
690 }
691
692 /**
693  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
694  *  @priv: driver private structure
695  *  Description: It is used for configuring the flow control in all queues
696  */
697 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
698 {
699         u32 tx_cnt = priv->plat->tx_queues_to_use;
700
701         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
702                                  priv->pause, tx_cnt);
703 }
704
705 /**
706  * stmmac_adjust_link - adjusts the link parameters
707  * @dev: net device structure
708  * Description: this is the helper called by the physical abstraction layer
709  * drivers to communicate the phy link status. According the speed and duplex
710  * this driver can invoke registered glue-logic as well.
711  * It also invoke the eee initialization because it could happen when switch
712  * on different networks (that are eee capable).
713  */
714 static void stmmac_adjust_link(struct net_device *dev)
715 {
716         struct stmmac_priv *priv = netdev_priv(dev);
717         struct phy_device *phydev = dev->phydev;
718         unsigned long flags;
719         int new_state = 0;
720
721         if (!phydev)
722                 return;
723
724         spin_lock_irqsave(&priv->lock, flags);
725
726         if (phydev->link) {
727                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
728
729                 /* Now we make sure that we can be in full duplex mode.
730                  * If not, we operate in half-duplex mode. */
731                 if (phydev->duplex != priv->oldduplex) {
732                         new_state = 1;
733                         if (!(phydev->duplex))
734                                 ctrl &= ~priv->hw->link.duplex;
735                         else
736                                 ctrl |= priv->hw->link.duplex;
737                         priv->oldduplex = phydev->duplex;
738                 }
739                 /* Flow Control operation */
740                 if (phydev->pause)
741                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
742
743                 if (phydev->speed != priv->speed) {
744                         new_state = 1;
745                         switch (phydev->speed) {
746                         case 1000:
747                                 if (priv->plat->has_gmac ||
748                                     priv->plat->has_gmac4)
749                                         ctrl &= ~priv->hw->link.port;
750                                 break;
751                         case 100:
752                                 if (priv->plat->has_gmac ||
753                                     priv->plat->has_gmac4) {
754                                         ctrl |= priv->hw->link.port;
755                                         ctrl |= priv->hw->link.speed;
756                                 } else {
757                                         ctrl &= ~priv->hw->link.port;
758                                 }
759                                 break;
760                         case 10:
761                                 if (priv->plat->has_gmac ||
762                                     priv->plat->has_gmac4) {
763                                         ctrl |= priv->hw->link.port;
764                                         ctrl &= ~(priv->hw->link.speed);
765                                 } else {
766                                         ctrl &= ~priv->hw->link.port;
767                                 }
768                                 break;
769                         default:
770                                 netif_warn(priv, link, priv->dev,
771                                            "broken speed: %d\n", phydev->speed);
772                                 phydev->speed = SPEED_UNKNOWN;
773                                 break;
774                         }
775                         if (phydev->speed != SPEED_UNKNOWN)
776                                 stmmac_hw_fix_mac_speed(priv);
777                         priv->speed = phydev->speed;
778                 }
779
780                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
781
782                 if (!priv->oldlink) {
783                         new_state = 1;
784                         priv->oldlink = 1;
785                 }
786         } else if (priv->oldlink) {
787                 new_state = 1;
788                 priv->oldlink = 0;
789                 priv->speed = SPEED_UNKNOWN;
790                 priv->oldduplex = DUPLEX_UNKNOWN;
791         }
792
793         if (new_state && netif_msg_link(priv))
794                 phy_print_status(phydev);
795
796         spin_unlock_irqrestore(&priv->lock, flags);
797
798         if (phydev->is_pseudo_fixed_link)
799                 /* Stop PHY layer to call the hook to adjust the link in case
800                  * of a switch is attached to the stmmac driver.
801                  */
802                 phydev->irq = PHY_IGNORE_INTERRUPT;
803         else
804                 /* At this stage, init the EEE if supported.
805                  * Never called in case of fixed_link.
806                  */
807                 priv->eee_enabled = stmmac_eee_init(priv);
808 }
809
810 /**
811  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
812  * @priv: driver private structure
813  * Description: this is to verify if the HW supports the PCS.
814  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
815  * configured for the TBI, RTBI, or SGMII PHY interface.
816  */
817 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
818 {
819         int interface = priv->plat->interface;
820
821         if (priv->dma_cap.pcs) {
822                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
823                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
824                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
825                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
826                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
827                         priv->hw->pcs = STMMAC_PCS_RGMII;
828                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
829                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
830                         priv->hw->pcs = STMMAC_PCS_SGMII;
831                 }
832         }
833 }
834
835 /**
836  * stmmac_init_phy - PHY initialization
837  * @dev: net device structure
838  * Description: it initializes the driver's PHY state, and attaches the PHY
839  * to the mac driver.
840  *  Return value:
841  *  0 on success
842  */
843 static int stmmac_init_phy(struct net_device *dev)
844 {
845         struct stmmac_priv *priv = netdev_priv(dev);
846         struct phy_device *phydev;
847         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
848         char bus_id[MII_BUS_ID_SIZE];
849         int interface = priv->plat->interface;
850         int max_speed = priv->plat->max_speed;
851         priv->oldlink = 0;
852         priv->speed = SPEED_UNKNOWN;
853         priv->oldduplex = DUPLEX_UNKNOWN;
854
855         if (priv->plat->phy_node) {
856                 phydev = of_phy_connect(dev, priv->plat->phy_node,
857                                         &stmmac_adjust_link, 0, interface);
858         } else {
859                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
860                          priv->plat->bus_id);
861
862                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
863                          priv->plat->phy_addr);
864                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
865                            phy_id_fmt);
866
867                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
868                                      interface);
869         }
870
871         if (IS_ERR_OR_NULL(phydev)) {
872                 netdev_err(priv->dev, "Could not attach to PHY\n");
873                 if (!phydev)
874                         return -ENODEV;
875
876                 return PTR_ERR(phydev);
877         }
878
879         /* Stop Advertising 1000BASE Capability if interface is not GMII */
880         if ((interface == PHY_INTERFACE_MODE_MII) ||
881             (interface == PHY_INTERFACE_MODE_RMII) ||
882                 (max_speed < 1000 && max_speed > 0))
883                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
884                                          SUPPORTED_1000baseT_Full);
885
886         /*
887          * Broken HW is sometimes missing the pull-up resistor on the
888          * MDIO line, which results in reads to non-existent devices returning
889          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
890          * device as well.
891          * Note: phydev->phy_id is the result of reading the UID PHY registers.
892          */
893         if (!priv->plat->phy_node && phydev->phy_id == 0) {
894                 phy_disconnect(phydev);
895                 return -ENODEV;
896         }
897
898         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
899          * subsequent PHY polling, make sure we force a link transition if
900          * we have a UP/DOWN/UP transition
901          */
902         if (phydev->is_pseudo_fixed_link)
903                 phydev->irq = PHY_POLL;
904
905         phy_attached_info(phydev);
906         return 0;
907 }
908
909 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
910 {
911         u32 rx_cnt = priv->plat->rx_queues_to_use;
912         void *head_rx;
913         u32 queue;
914
915         /* Display RX rings */
916         for (queue = 0; queue < rx_cnt; queue++) {
917                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
918
919                 pr_info("\tRX Queue %u rings\n", queue);
920
921                 if (priv->extend_desc)
922                         head_rx = (void *)rx_q->dma_erx;
923                 else
924                         head_rx = (void *)rx_q->dma_rx;
925
926                 /* Display RX ring */
927                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
928         }
929 }
930
931 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
932 {
933         u32 tx_cnt = priv->plat->tx_queues_to_use;
934         void *head_tx;
935         u32 queue;
936
937         /* Display TX rings */
938         for (queue = 0; queue < tx_cnt; queue++) {
939                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
940
941                 pr_info("\tTX Queue %d rings\n", queue);
942
943                 if (priv->extend_desc)
944                         head_tx = (void *)tx_q->dma_etx;
945                 else
946                         head_tx = (void *)tx_q->dma_tx;
947
948                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
949         }
950 }
951
952 static void stmmac_display_rings(struct stmmac_priv *priv)
953 {
954         /* Display RX ring */
955         stmmac_display_rx_rings(priv);
956
957         /* Display TX ring */
958         stmmac_display_tx_rings(priv);
959 }
960
961 static int stmmac_set_bfsize(int mtu, int bufsize)
962 {
963         int ret = bufsize;
964
965         if (mtu >= BUF_SIZE_4KiB)
966                 ret = BUF_SIZE_8KiB;
967         else if (mtu >= BUF_SIZE_2KiB)
968                 ret = BUF_SIZE_4KiB;
969         else if (mtu > DEFAULT_BUFSIZE)
970                 ret = BUF_SIZE_2KiB;
971         else
972                 ret = DEFAULT_BUFSIZE;
973
974         return ret;
975 }
976
977 /**
978  * stmmac_clear_rx_descriptors - clear RX descriptors
979  * @priv: driver private structure
980  * @queue: RX queue index
981  * Description: this function is called to clear the RX descriptors
982  * in case of both basic and extended descriptors are used.
983  */
984 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
985 {
986         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
987         int i;
988
989         /* Clear the RX descriptors */
990         for (i = 0; i < DMA_RX_SIZE; i++)
991                 if (priv->extend_desc)
992                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
993                                                      priv->use_riwt, priv->mode,
994                                                      (i == DMA_RX_SIZE - 1));
995                 else
996                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
997                                                      priv->use_riwt, priv->mode,
998                                                      (i == DMA_RX_SIZE - 1));
999 }
1000
1001 /**
1002  * stmmac_clear_tx_descriptors - clear tx descriptors
1003  * @priv: driver private structure
1004  * @queue: TX queue index.
1005  * Description: this function is called to clear the TX descriptors
1006  * in case of both basic and extended descriptors are used.
1007  */
1008 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1009 {
1010         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1011         int i;
1012
1013         /* Clear the TX descriptors */
1014         for (i = 0; i < DMA_TX_SIZE; i++)
1015                 if (priv->extend_desc)
1016                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1017                                                      priv->mode,
1018                                                      (i == DMA_TX_SIZE - 1));
1019                 else
1020                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1021                                                      priv->mode,
1022                                                      (i == DMA_TX_SIZE - 1));
1023 }
1024
1025 /**
1026  * stmmac_clear_descriptors - clear descriptors
1027  * @priv: driver private structure
1028  * Description: this function is called to clear the TX and RX descriptors
1029  * in case of both basic and extended descriptors are used.
1030  */
1031 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1032 {
1033         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1034         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1035         u32 queue;
1036
1037         /* Clear the RX descriptors */
1038         for (queue = 0; queue < rx_queue_cnt; queue++)
1039                 stmmac_clear_rx_descriptors(priv, queue);
1040
1041         /* Clear the TX descriptors */
1042         for (queue = 0; queue < tx_queue_cnt; queue++)
1043                 stmmac_clear_tx_descriptors(priv, queue);
1044 }
1045
1046 /**
1047  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1048  * @priv: driver private structure
1049  * @p: descriptor pointer
1050  * @i: descriptor index
1051  * @flags: gfp flag
1052  * @queue: RX queue index
1053  * Description: this function is called to allocate a receive buffer, perform
1054  * the DMA mapping and init the descriptor.
1055  */
1056 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1057                                   int i, gfp_t flags, u32 queue)
1058 {
1059         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1060         struct sk_buff *skb;
1061
1062         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1063         if (!skb) {
1064                 netdev_err(priv->dev,
1065                            "%s: Rx init fails; skb is NULL\n", __func__);
1066                 return -ENOMEM;
1067         }
1068         rx_q->rx_skbuff[i] = skb;
1069         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1070                                                 priv->dma_buf_sz,
1071                                                 DMA_FROM_DEVICE);
1072         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1073                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1074                 dev_kfree_skb_any(skb);
1075                 return -EINVAL;
1076         }
1077
1078         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1079                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1080         else
1081                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1082
1083         if ((priv->hw->mode->init_desc3) &&
1084             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1085                 priv->hw->mode->init_desc3(p);
1086
1087         return 0;
1088 }
1089
1090 /**
1091  * stmmac_free_rx_buffer - free RX dma buffers
1092  * @priv: private structure
1093  * @queue: RX queue index
1094  * @i: buffer index.
1095  */
1096 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1097 {
1098         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1099
1100         if (rx_q->rx_skbuff[i]) {
1101                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1102                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1103                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1104         }
1105         rx_q->rx_skbuff[i] = NULL;
1106 }
1107
1108 /**
1109  * stmmac_free_tx_buffer - free RX dma buffers
1110  * @priv: private structure
1111  * @queue: RX queue index
1112  * @i: buffer index.
1113  */
1114 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1115 {
1116         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1117
1118         if (tx_q->tx_skbuff_dma[i].buf) {
1119                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1120                         dma_unmap_page(priv->device,
1121                                        tx_q->tx_skbuff_dma[i].buf,
1122                                        tx_q->tx_skbuff_dma[i].len,
1123                                        DMA_TO_DEVICE);
1124                 else
1125                         dma_unmap_single(priv->device,
1126                                          tx_q->tx_skbuff_dma[i].buf,
1127                                          tx_q->tx_skbuff_dma[i].len,
1128                                          DMA_TO_DEVICE);
1129         }
1130
1131         if (tx_q->tx_skbuff[i]) {
1132                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1133                 tx_q->tx_skbuff[i] = NULL;
1134                 tx_q->tx_skbuff_dma[i].buf = 0;
1135                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1136         }
1137 }
1138
1139 /**
1140  * init_dma_rx_desc_rings - init the RX descriptor rings
1141  * @dev: net device structure
1142  * @flags: gfp flag.
1143  * Description: this function initializes the DMA RX descriptors
1144  * and allocates the socket buffers. It supports the chained and ring
1145  * modes.
1146  */
1147 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1148 {
1149         struct stmmac_priv *priv = netdev_priv(dev);
1150         u32 rx_count = priv->plat->rx_queues_to_use;
1151         unsigned int bfsize = 0;
1152         int ret = -ENOMEM;
1153         u32 queue;
1154         int i;
1155
1156         if (priv->hw->mode->set_16kib_bfsize)
1157                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1158
1159         if (bfsize < BUF_SIZE_16KiB)
1160                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1161
1162         priv->dma_buf_sz = bfsize;
1163
1164         /* RX INITIALIZATION */
1165         netif_dbg(priv, probe, priv->dev,
1166                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1167
1168         for (queue = 0; queue < rx_count; queue++) {
1169                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1170
1171                 netif_dbg(priv, probe, priv->dev,
1172                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1173                           (u32)rx_q->dma_rx_phy);
1174
1175                 for (i = 0; i < DMA_RX_SIZE; i++) {
1176                         struct dma_desc *p;
1177
1178                         if (priv->extend_desc)
1179                                 p = &((rx_q->dma_erx + i)->basic);
1180                         else
1181                                 p = rx_q->dma_rx + i;
1182
1183                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1184                                                      queue);
1185                         if (ret)
1186                                 goto err_init_rx_buffers;
1187
1188                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1189                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1190                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1191                 }
1192
1193                 rx_q->cur_rx = 0;
1194                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1195
1196                 stmmac_clear_rx_descriptors(priv, queue);
1197
1198                 /* Setup the chained descriptor addresses */
1199                 if (priv->mode == STMMAC_CHAIN_MODE) {
1200                         if (priv->extend_desc)
1201                                 priv->hw->mode->init(rx_q->dma_erx,
1202                                                      rx_q->dma_rx_phy,
1203                                                      DMA_RX_SIZE, 1);
1204                         else
1205                                 priv->hw->mode->init(rx_q->dma_rx,
1206                                                      rx_q->dma_rx_phy,
1207                                                      DMA_RX_SIZE, 0);
1208                 }
1209         }
1210
1211         buf_sz = bfsize;
1212
1213         return 0;
1214
1215 err_init_rx_buffers:
1216         while (queue >= 0) {
1217                 while (--i >= 0)
1218                         stmmac_free_rx_buffer(priv, queue, i);
1219
1220                 if (queue == 0)
1221                         break;
1222
1223                 i = DMA_RX_SIZE;
1224                 queue--;
1225         }
1226
1227         return ret;
1228 }
1229
1230 /**
1231  * init_dma_tx_desc_rings - init the TX descriptor rings
1232  * @dev: net device structure.
1233  * Description: this function initializes the DMA TX descriptors
1234  * and allocates the socket buffers. It supports the chained and ring
1235  * modes.
1236  */
1237 static int init_dma_tx_desc_rings(struct net_device *dev)
1238 {
1239         struct stmmac_priv *priv = netdev_priv(dev);
1240         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1241         u32 queue;
1242         int i;
1243
1244         for (queue = 0; queue < tx_queue_cnt; queue++) {
1245                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1246
1247                 netif_dbg(priv, probe, priv->dev,
1248                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1249                          (u32)tx_q->dma_tx_phy);
1250
1251                 /* Setup the chained descriptor addresses */
1252                 if (priv->mode == STMMAC_CHAIN_MODE) {
1253                         if (priv->extend_desc)
1254                                 priv->hw->mode->init(tx_q->dma_etx,
1255                                                      tx_q->dma_tx_phy,
1256                                                      DMA_TX_SIZE, 1);
1257                         else
1258                                 priv->hw->mode->init(tx_q->dma_tx,
1259                                                      tx_q->dma_tx_phy,
1260                                                      DMA_TX_SIZE, 0);
1261                 }
1262
1263                 for (i = 0; i < DMA_TX_SIZE; i++) {
1264                         struct dma_desc *p;
1265
1266                         if (priv->extend_desc)
1267                                 p = &((tx_q->dma_etx + i)->basic);
1268                         else
1269                                 p = tx_q->dma_tx + i;
1270
1271                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1272                                 p->des0 = 0;
1273                                 p->des1 = 0;
1274                                 p->des2 = 0;
1275                                 p->des3 = 0;
1276                         } else {
1277                                 p->des2 = 0;
1278                         }
1279
1280                         tx_q->tx_skbuff_dma[i].buf = 0;
1281                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1282                         tx_q->tx_skbuff_dma[i].len = 0;
1283                         tx_q->tx_skbuff_dma[i].last_segment = false;
1284                         tx_q->tx_skbuff[i] = NULL;
1285                 }
1286
1287                 tx_q->dirty_tx = 0;
1288                 tx_q->cur_tx = 0;
1289         }
1290
1291         netdev_reset_queue(priv->dev);
1292
1293         return 0;
1294 }
1295
1296 /**
1297  * init_dma_desc_rings - init the RX/TX descriptor rings
1298  * @dev: net device structure
1299  * @flags: gfp flag.
1300  * Description: this function initializes the DMA RX/TX descriptors
1301  * and allocates the socket buffers. It supports the chained and ring
1302  * modes.
1303  */
1304 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1305 {
1306         struct stmmac_priv *priv = netdev_priv(dev);
1307         int ret;
1308
1309         ret = init_dma_rx_desc_rings(dev, flags);
1310         if (ret)
1311                 return ret;
1312
1313         ret = init_dma_tx_desc_rings(dev);
1314
1315         stmmac_clear_descriptors(priv);
1316
1317         if (netif_msg_hw(priv))
1318                 stmmac_display_rings(priv);
1319
1320         return ret;
1321 }
1322
1323 /**
1324  * dma_free_rx_skbufs - free RX dma buffers
1325  * @priv: private structure
1326  * @queue: RX queue index
1327  */
1328 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1329 {
1330         int i;
1331
1332         for (i = 0; i < DMA_RX_SIZE; i++)
1333                 stmmac_free_rx_buffer(priv, queue, i);
1334 }
1335
1336 /**
1337  * dma_free_tx_skbufs - free TX dma buffers
1338  * @priv: private structure
1339  * @queue: TX queue index
1340  */
1341 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1342 {
1343         int i;
1344
1345         for (i = 0; i < DMA_TX_SIZE; i++)
1346                 stmmac_free_tx_buffer(priv, queue, i);
1347 }
1348
1349 /**
1350  * free_dma_rx_desc_resources - free RX dma desc resources
1351  * @priv: private structure
1352  */
1353 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1354 {
1355         u32 rx_count = priv->plat->rx_queues_to_use;
1356         u32 queue;
1357
1358         /* Free RX queue resources */
1359         for (queue = 0; queue < rx_count; queue++) {
1360                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1361
1362                 /* Release the DMA RX socket buffers */
1363                 dma_free_rx_skbufs(priv, queue);
1364
1365                 /* Free DMA regions of consistent memory previously allocated */
1366                 if (!priv->extend_desc)
1367                         dma_free_coherent(priv->device,
1368                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1369                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1370                 else
1371                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1372                                           sizeof(struct dma_extended_desc),
1373                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1374
1375                 kfree(rx_q->rx_skbuff_dma);
1376                 kfree(rx_q->rx_skbuff);
1377         }
1378 }
1379
1380 /**
1381  * free_dma_tx_desc_resources - free TX dma desc resources
1382  * @priv: private structure
1383  */
1384 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1385 {
1386         u32 tx_count = priv->plat->tx_queues_to_use;
1387         u32 queue = 0;
1388
1389         /* Free TX queue resources */
1390         for (queue = 0; queue < tx_count; queue++) {
1391                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1392
1393                 /* Release the DMA TX socket buffers */
1394                 dma_free_tx_skbufs(priv, queue);
1395
1396                 /* Free DMA regions of consistent memory previously allocated */
1397                 if (!priv->extend_desc)
1398                         dma_free_coherent(priv->device,
1399                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1400                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1401                 else
1402                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1403                                           sizeof(struct dma_extended_desc),
1404                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1405
1406                 kfree(tx_q->tx_skbuff_dma);
1407                 kfree(tx_q->tx_skbuff);
1408         }
1409 }
1410
1411 /**
1412  * alloc_dma_rx_desc_resources - alloc RX resources.
1413  * @priv: private structure
1414  * Description: according to which descriptor can be used (extend or basic)
1415  * this function allocates the resources for TX and RX paths. In case of
1416  * reception, for example, it pre-allocated the RX socket buffer in order to
1417  * allow zero-copy mechanism.
1418  */
1419 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1420 {
1421         u32 rx_count = priv->plat->rx_queues_to_use;
1422         int ret = -ENOMEM;
1423         u32 queue;
1424
1425         /* RX queues buffers and DMA */
1426         for (queue = 0; queue < rx_count; queue++) {
1427                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1428
1429                 rx_q->queue_index = queue;
1430                 rx_q->priv_data = priv;
1431
1432                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1433                                                     sizeof(dma_addr_t),
1434                                                     GFP_KERNEL);
1435                 if (!rx_q->rx_skbuff_dma)
1436                         return -ENOMEM;
1437
1438                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1439                                                 sizeof(struct sk_buff *),
1440                                                 GFP_KERNEL);
1441                 if (!rx_q->rx_skbuff)
1442                         goto err_dma;
1443
1444                 if (priv->extend_desc) {
1445                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1446                                                             DMA_RX_SIZE *
1447                                                             sizeof(struct
1448                                                             dma_extended_desc),
1449                                                             &rx_q->dma_rx_phy,
1450                                                             GFP_KERNEL);
1451                         if (!rx_q->dma_erx)
1452                                 goto err_dma;
1453
1454                 } else {
1455                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1456                                                            DMA_RX_SIZE *
1457                                                            sizeof(struct
1458                                                            dma_desc),
1459                                                            &rx_q->dma_rx_phy,
1460                                                            GFP_KERNEL);
1461                         if (!rx_q->dma_rx)
1462                                 goto err_dma;
1463                 }
1464         }
1465
1466         return 0;
1467
1468 err_dma:
1469         free_dma_rx_desc_resources(priv);
1470
1471         return ret;
1472 }
1473
1474 /**
1475  * alloc_dma_tx_desc_resources - alloc TX resources.
1476  * @priv: private structure
1477  * Description: according to which descriptor can be used (extend or basic)
1478  * this function allocates the resources for TX and RX paths. In case of
1479  * reception, for example, it pre-allocated the RX socket buffer in order to
1480  * allow zero-copy mechanism.
1481  */
1482 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1483 {
1484         u32 tx_count = priv->plat->tx_queues_to_use;
1485         int ret = -ENOMEM;
1486         u32 queue;
1487
1488         /* TX queues buffers and DMA */
1489         for (queue = 0; queue < tx_count; queue++) {
1490                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1491
1492                 tx_q->queue_index = queue;
1493                 tx_q->priv_data = priv;
1494
1495                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1496                                                     sizeof(*tx_q->tx_skbuff_dma),
1497                                                     GFP_KERNEL);
1498                 if (!tx_q->tx_skbuff_dma)
1499                         return -ENOMEM;
1500
1501                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1502                                                 sizeof(struct sk_buff *),
1503                                                 GFP_KERNEL);
1504                 if (!tx_q->tx_skbuff)
1505                         goto err_dma_buffers;
1506
1507                 if (priv->extend_desc) {
1508                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1509                                                             DMA_TX_SIZE *
1510                                                             sizeof(struct
1511                                                             dma_extended_desc),
1512                                                             &tx_q->dma_tx_phy,
1513                                                             GFP_KERNEL);
1514                         if (!tx_q->dma_etx)
1515                                 goto err_dma_buffers;
1516                 } else {
1517                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1518                                                            DMA_TX_SIZE *
1519                                                            sizeof(struct
1520                                                                   dma_desc),
1521                                                            &tx_q->dma_tx_phy,
1522                                                            GFP_KERNEL);
1523                         if (!tx_q->dma_tx)
1524                                 goto err_dma_buffers;
1525                 }
1526         }
1527
1528         return 0;
1529
1530 err_dma_buffers:
1531         free_dma_tx_desc_resources(priv);
1532
1533         return ret;
1534 }
1535
1536 /**
1537  * alloc_dma_desc_resources - alloc TX/RX resources.
1538  * @priv: private structure
1539  * Description: according to which descriptor can be used (extend or basic)
1540  * this function allocates the resources for TX and RX paths. In case of
1541  * reception, for example, it pre-allocated the RX socket buffer in order to
1542  * allow zero-copy mechanism.
1543  */
1544 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1545 {
1546         /* RX Allocation */
1547         int ret = alloc_dma_rx_desc_resources(priv);
1548
1549         if (ret)
1550                 return ret;
1551
1552         ret = alloc_dma_tx_desc_resources(priv);
1553
1554         return ret;
1555 }
1556
1557 /**
1558  * free_dma_desc_resources - free dma desc resources
1559  * @priv: private structure
1560  */
1561 static void free_dma_desc_resources(struct stmmac_priv *priv)
1562 {
1563         /* Release the DMA RX socket buffers */
1564         free_dma_rx_desc_resources(priv);
1565
1566         /* Release the DMA TX socket buffers */
1567         free_dma_tx_desc_resources(priv);
1568 }
1569
1570 /**
1571  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1572  *  @priv: driver private structure
1573  *  Description: It is used for enabling the rx queues in the MAC
1574  */
1575 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1576 {
1577         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1578         int queue;
1579         u8 mode;
1580
1581         for (queue = 0; queue < rx_queues_count; queue++) {
1582                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1583                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1584         }
1585 }
1586
1587 /**
1588  * stmmac_start_rx_dma - start RX DMA channel
1589  * @priv: driver private structure
1590  * @chan: RX channel index
1591  * Description:
1592  * This starts a RX DMA channel
1593  */
1594 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1595 {
1596         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1597         priv->hw->dma->start_rx(priv->ioaddr, chan);
1598 }
1599
1600 /**
1601  * stmmac_start_tx_dma - start TX DMA channel
1602  * @priv: driver private structure
1603  * @chan: TX channel index
1604  * Description:
1605  * This starts a TX DMA channel
1606  */
1607 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1608 {
1609         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1610         priv->hw->dma->start_tx(priv->ioaddr, chan);
1611 }
1612
1613 /**
1614  * stmmac_stop_rx_dma - stop RX DMA channel
1615  * @priv: driver private structure
1616  * @chan: RX channel index
1617  * Description:
1618  * This stops a RX DMA channel
1619  */
1620 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1621 {
1622         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1623         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1624 }
1625
1626 /**
1627  * stmmac_stop_tx_dma - stop TX DMA channel
1628  * @priv: driver private structure
1629  * @chan: TX channel index
1630  * Description:
1631  * This stops a TX DMA channel
1632  */
1633 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1634 {
1635         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1636         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1637 }
1638
1639 /**
1640  * stmmac_start_all_dma - start all RX and TX DMA channels
1641  * @priv: driver private structure
1642  * Description:
1643  * This starts all the RX and TX DMA channels
1644  */
1645 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1646 {
1647         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1648         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1649         u32 chan = 0;
1650
1651         for (chan = 0; chan < rx_channels_count; chan++)
1652                 stmmac_start_rx_dma(priv, chan);
1653
1654         for (chan = 0; chan < tx_channels_count; chan++)
1655                 stmmac_start_tx_dma(priv, chan);
1656 }
1657
1658 /**
1659  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1660  * @priv: driver private structure
1661  * Description:
1662  * This stops the RX and TX DMA channels
1663  */
1664 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1665 {
1666         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1667         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1668         u32 chan = 0;
1669
1670         for (chan = 0; chan < rx_channels_count; chan++)
1671                 stmmac_stop_rx_dma(priv, chan);
1672
1673         for (chan = 0; chan < tx_channels_count; chan++)
1674                 stmmac_stop_tx_dma(priv, chan);
1675 }
1676
1677 /**
1678  *  stmmac_dma_operation_mode - HW DMA operation mode
1679  *  @priv: driver private structure
1680  *  Description: it is used for configuring the DMA operation mode register in
1681  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1682  */
1683 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1684 {
1685         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1686         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1687         int rxfifosz = priv->plat->rx_fifo_size;
1688         u32 txmode = 0;
1689         u32 rxmode = 0;
1690         u32 chan = 0;
1691
1692         if (rxfifosz == 0)
1693                 rxfifosz = priv->dma_cap.rx_fifo_size;
1694
1695         if (priv->plat->force_thresh_dma_mode) {
1696                 txmode = tc;
1697                 rxmode = tc;
1698         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1699                 /*
1700                  * In case of GMAC, SF mode can be enabled
1701                  * to perform the TX COE in HW. This depends on:
1702                  * 1) TX COE if actually supported
1703                  * 2) There is no bugged Jumbo frame support
1704                  *    that needs to not insert csum in the TDES.
1705                  */
1706                 txmode = SF_DMA_MODE;
1707                 rxmode = SF_DMA_MODE;
1708                 priv->xstats.threshold = SF_DMA_MODE;
1709         } else {
1710                 txmode = tc;
1711                 rxmode = SF_DMA_MODE;
1712         }
1713
1714         /* configure all channels */
1715         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1716                 for (chan = 0; chan < rx_channels_count; chan++)
1717                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1718                                                    rxfifosz);
1719
1720                 for (chan = 0; chan < tx_channels_count; chan++)
1721                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1722         } else {
1723                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1724                                         rxfifosz);
1725         }
1726 }
1727
1728 /**
1729  * stmmac_tx_clean - to manage the transmission completion
1730  * @priv: driver private structure
1731  * @queue: TX queue index
1732  * Description: it reclaims the transmit resources after transmission completes.
1733  */
1734 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1735 {
1736         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1737         unsigned int bytes_compl = 0, pkts_compl = 0;
1738         unsigned int entry = tx_q->dirty_tx;
1739
1740         netif_tx_lock(priv->dev);
1741
1742         priv->xstats.tx_clean++;
1743
1744         while (entry != tx_q->cur_tx) {
1745                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1746                 struct dma_desc *p;
1747                 int status;
1748
1749                 if (priv->extend_desc)
1750                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1751                 else
1752                         p = tx_q->dma_tx + entry;
1753
1754                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1755                                                       &priv->xstats, p,
1756                                                       priv->ioaddr);
1757                 /* Check if the descriptor is owned by the DMA */
1758                 if (unlikely(status & tx_dma_own))
1759                         break;
1760
1761                 /* Just consider the last segment and ...*/
1762                 if (likely(!(status & tx_not_ls))) {
1763                         /* ... verify the status error condition */
1764                         if (unlikely(status & tx_err)) {
1765                                 priv->dev->stats.tx_errors++;
1766                         } else {
1767                                 priv->dev->stats.tx_packets++;
1768                                 priv->xstats.tx_pkt_n++;
1769                         }
1770                         stmmac_get_tx_hwtstamp(priv, p, skb);
1771                 }
1772
1773                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1774                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1775                                 dma_unmap_page(priv->device,
1776                                                tx_q->tx_skbuff_dma[entry].buf,
1777                                                tx_q->tx_skbuff_dma[entry].len,
1778                                                DMA_TO_DEVICE);
1779                         else
1780                                 dma_unmap_single(priv->device,
1781                                                  tx_q->tx_skbuff_dma[entry].buf,
1782                                                  tx_q->tx_skbuff_dma[entry].len,
1783                                                  DMA_TO_DEVICE);
1784                         tx_q->tx_skbuff_dma[entry].buf = 0;
1785                         tx_q->tx_skbuff_dma[entry].len = 0;
1786                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1787                 }
1788
1789                 if (priv->hw->mode->clean_desc3)
1790                         priv->hw->mode->clean_desc3(tx_q, p);
1791
1792                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1793                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1794
1795                 if (likely(skb != NULL)) {
1796                         pkts_compl++;
1797                         bytes_compl += skb->len;
1798                         dev_consume_skb_any(skb);
1799                         tx_q->tx_skbuff[entry] = NULL;
1800                 }
1801
1802                 priv->hw->desc->release_tx_desc(p, priv->mode);
1803
1804                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1805         }
1806         tx_q->dirty_tx = entry;
1807
1808         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1809
1810         if (unlikely(netif_queue_stopped(priv->dev) &&
1811             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH)) {
1812                 netif_dbg(priv, tx_done, priv->dev,
1813                           "%s: restart transmit\n", __func__);
1814                 netif_wake_queue(priv->dev);
1815         }
1816
1817         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1818                 stmmac_enable_eee_mode(priv);
1819                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1820         }
1821         netif_tx_unlock(priv->dev);
1822 }
1823
1824 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1825 {
1826         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1827 }
1828
1829 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1830 {
1831         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1832 }
1833
1834 /**
1835  * stmmac_tx_err - to manage the tx error
1836  * @priv: driver private structure
1837  * @chan: channel index
1838  * Description: it cleans the descriptors and restarts the transmission
1839  * in case of transmission errors.
1840  */
1841 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1842 {
1843         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1844         int i;
1845
1846         netif_stop_queue(priv->dev);
1847
1848         stmmac_stop_tx_dma(priv, chan);
1849         dma_free_tx_skbufs(priv, chan);
1850         for (i = 0; i < DMA_TX_SIZE; i++)
1851                 if (priv->extend_desc)
1852                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1853                                                      priv->mode,
1854                                                      (i == DMA_TX_SIZE - 1));
1855                 else
1856                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1857                                                      priv->mode,
1858                                                      (i == DMA_TX_SIZE - 1));
1859         tx_q->dirty_tx = 0;
1860         tx_q->cur_tx = 0;
1861         netdev_reset_queue(priv->dev);
1862         stmmac_start_tx_dma(priv, chan);
1863
1864         priv->dev->stats.tx_errors++;
1865         netif_wake_queue(priv->dev);
1866 }
1867
1868 /**
1869  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1870  *  @priv: driver private structure
1871  *  @txmode: TX operating mode
1872  *  @rxmode: RX operating mode
1873  *  @chan: channel index
1874  *  Description: it is used for configuring of the DMA operation mode in
1875  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1876  *  mode.
1877  */
1878 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1879                                           u32 rxmode, u32 chan)
1880 {
1881         int rxfifosz = priv->plat->rx_fifo_size;
1882
1883         if (rxfifosz == 0)
1884                 rxfifosz = priv->dma_cap.rx_fifo_size;
1885
1886         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1887                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1888                                            rxfifosz);
1889                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1890         } else {
1891                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1892                                         rxfifosz);
1893         }
1894 }
1895
1896 /**
1897  * stmmac_dma_interrupt - DMA ISR
1898  * @priv: driver private structure
1899  * Description: this is the DMA ISR. It is called by the main ISR.
1900  * It calls the dwmac dma routine and schedule poll method in case of some
1901  * work can be done.
1902  */
1903 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1904 {
1905         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1906         int status;
1907         u32 chan;
1908
1909         for (chan = 0; chan < tx_channel_count; chan++) {
1910                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1911                                                       &priv->xstats, chan);
1912                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1913                         if (likely(napi_schedule_prep(&priv->napi))) {
1914                                 stmmac_disable_dma_irq(priv, chan);
1915                                 __napi_schedule(&priv->napi);
1916                         }
1917                 }
1918
1919                 if (unlikely(status & tx_hard_error_bump_tc)) {
1920                         /* Try to bump up the dma threshold on this failure */
1921                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1922                             (tc <= 256)) {
1923                                 tc += 64;
1924                                 if (priv->plat->force_thresh_dma_mode)
1925                                         stmmac_set_dma_operation_mode(priv,
1926                                                                       tc,
1927                                                                       tc,
1928                                                                       chan);
1929                                 else
1930                                         stmmac_set_dma_operation_mode(priv,
1931                                                                     tc,
1932                                                                     SF_DMA_MODE,
1933                                                                     chan);
1934                                 priv->xstats.threshold = tc;
1935                         }
1936                 } else if (unlikely(status == tx_hard_error)) {
1937                         stmmac_tx_err(priv, chan);
1938                 }
1939         }
1940 }
1941
1942 /**
1943  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1944  * @priv: driver private structure
1945  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1946  */
1947 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1948 {
1949         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1950                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1951
1952         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1953                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1954                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1955         } else {
1956                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1957                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1958         }
1959
1960         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1961
1962         if (priv->dma_cap.rmon) {
1963                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1964                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1965         } else
1966                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1967 }
1968
1969 /**
1970  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1971  * @priv: driver private structure
1972  * Description: select the Enhanced/Alternate or Normal descriptors.
1973  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1974  * supported by the HW capability register.
1975  */
1976 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1977 {
1978         if (priv->plat->enh_desc) {
1979                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1980
1981                 /* GMAC older than 3.50 has no extended descriptors */
1982                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1983                         dev_info(priv->device, "Enabled extended descriptors\n");
1984                         priv->extend_desc = 1;
1985                 } else
1986                         dev_warn(priv->device, "Extended descriptors not supported\n");
1987
1988                 priv->hw->desc = &enh_desc_ops;
1989         } else {
1990                 dev_info(priv->device, "Normal descriptors\n");
1991                 priv->hw->desc = &ndesc_ops;
1992         }
1993 }
1994
1995 /**
1996  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1997  * @priv: driver private structure
1998  * Description:
1999  *  new GMAC chip generations have a new register to indicate the
2000  *  presence of the optional feature/functions.
2001  *  This can be also used to override the value passed through the
2002  *  platform and necessary for old MAC10/100 and GMAC chips.
2003  */
2004 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2005 {
2006         u32 ret = 0;
2007
2008         if (priv->hw->dma->get_hw_feature) {
2009                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2010                                               &priv->dma_cap);
2011                 ret = 1;
2012         }
2013
2014         return ret;
2015 }
2016
2017 /**
2018  * stmmac_check_ether_addr - check if the MAC addr is valid
2019  * @priv: driver private structure
2020  * Description:
2021  * it is to verify if the MAC address is valid, in case of failures it
2022  * generates a random MAC address
2023  */
2024 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2025 {
2026         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2027                 priv->hw->mac->get_umac_addr(priv->hw,
2028                                              priv->dev->dev_addr, 0);
2029                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2030                         eth_hw_addr_random(priv->dev);
2031                 netdev_info(priv->dev, "device MAC address %pM\n",
2032                             priv->dev->dev_addr);
2033         }
2034 }
2035
2036 /**
2037  * stmmac_init_dma_engine - DMA init.
2038  * @priv: driver private structure
2039  * Description:
2040  * It inits the DMA invoking the specific MAC/GMAC callback.
2041  * Some DMA parameters can be passed from the platform;
2042  * in case of these are not passed a default is kept for the MAC or GMAC.
2043  */
2044 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2045 {
2046         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2047         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2048         struct stmmac_rx_queue *rx_q;
2049         struct stmmac_tx_queue *tx_q;
2050         u32 dummy_dma_rx_phy = 0;
2051         u32 dummy_dma_tx_phy = 0;
2052         u32 chan = 0;
2053         int atds = 0;
2054         int ret = 0;
2055
2056         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2057                 dev_err(priv->device, "Invalid DMA configuration\n");
2058                 return -EINVAL;
2059         }
2060
2061         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2062                 atds = 1;
2063
2064         ret = priv->hw->dma->reset(priv->ioaddr);
2065         if (ret) {
2066                 dev_err(priv->device, "Failed to reset the dma\n");
2067                 return ret;
2068         }
2069
2070         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2071                 /* DMA Configuration */
2072                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2073                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2074
2075                 /* DMA RX Channel Configuration */
2076                 for (chan = 0; chan < rx_channels_count; chan++) {
2077                         rx_q = &priv->rx_queue[chan];
2078
2079                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2080                                                     priv->plat->dma_cfg,
2081                                                     rx_q->dma_rx_phy, chan);
2082
2083                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2084                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2085                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2086                                                        rx_q->rx_tail_addr,
2087                                                        chan);
2088                 }
2089
2090                 /* DMA TX Channel Configuration */
2091                 for (chan = 0; chan < tx_channels_count; chan++) {
2092                         tx_q = &priv->tx_queue[chan];
2093
2094                         priv->hw->dma->init_chan(priv->ioaddr,
2095                                                  priv->plat->dma_cfg,
2096                                                  chan);
2097
2098                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2099                                                     priv->plat->dma_cfg,
2100                                                     tx_q->dma_tx_phy, chan);
2101
2102                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2103                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2104                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2105                                                        tx_q->tx_tail_addr,
2106                                                        chan);
2107                 }
2108         } else {
2109                 rx_q = &priv->rx_queue[chan];
2110                 tx_q = &priv->tx_queue[chan];
2111                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2112                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2113         }
2114
2115         if (priv->plat->axi && priv->hw->dma->axi)
2116                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2117
2118         return ret;
2119 }
2120
2121 /**
2122  * stmmac_tx_timer - mitigation sw timer for tx.
2123  * @data: data pointer
2124  * Description:
2125  * This is the timer handler to directly invoke the stmmac_tx_clean.
2126  */
2127 static void stmmac_tx_timer(unsigned long data)
2128 {
2129         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2130         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2131         u32 queue;
2132
2133         /* let's scan all the tx queues */
2134         for (queue = 0; queue < tx_queues_count; queue++)
2135                 stmmac_tx_clean(priv, queue);
2136 }
2137
2138 /**
2139  * stmmac_init_tx_coalesce - init tx mitigation options.
2140  * @priv: driver private structure
2141  * Description:
2142  * This inits the transmit coalesce parameters: i.e. timer rate,
2143  * timer handler and default threshold used for enabling the
2144  * interrupt on completion bit.
2145  */
2146 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2147 {
2148         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2149         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2150         init_timer(&priv->txtimer);
2151         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2152         priv->txtimer.data = (unsigned long)priv;
2153         priv->txtimer.function = stmmac_tx_timer;
2154         add_timer(&priv->txtimer);
2155 }
2156
2157 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2158 {
2159         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2160         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2161         u32 chan;
2162
2163         /* set TX ring length */
2164         if (priv->hw->dma->set_tx_ring_len) {
2165                 for (chan = 0; chan < tx_channels_count; chan++)
2166                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2167                                                        (DMA_TX_SIZE - 1), chan);
2168         }
2169
2170         /* set RX ring length */
2171         if (priv->hw->dma->set_rx_ring_len) {
2172                 for (chan = 0; chan < rx_channels_count; chan++)
2173                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2174                                                        (DMA_RX_SIZE - 1), chan);
2175         }
2176 }
2177
2178 /**
2179  *  stmmac_set_tx_queue_weight - Set TX queue weight
2180  *  @priv: driver private structure
2181  *  Description: It is used for setting TX queues weight
2182  */
2183 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2184 {
2185         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2186         u32 weight;
2187         u32 queue;
2188
2189         for (queue = 0; queue < tx_queues_count; queue++) {
2190                 weight = priv->plat->tx_queues_cfg[queue].weight;
2191                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2192         }
2193 }
2194
2195 /**
2196  *  stmmac_configure_cbs - Configure CBS in TX queue
2197  *  @priv: driver private structure
2198  *  Description: It is used for configuring CBS in AVB TX queues
2199  */
2200 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2201 {
2202         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2203         u32 mode_to_use;
2204         u32 queue;
2205
2206         /* queue 0 is reserved for legacy traffic */
2207         for (queue = 1; queue < tx_queues_count; queue++) {
2208                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2209                 if (mode_to_use == MTL_QUEUE_DCB)
2210                         continue;
2211
2212                 priv->hw->mac->config_cbs(priv->hw,
2213                                 priv->plat->tx_queues_cfg[queue].send_slope,
2214                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2215                                 priv->plat->tx_queues_cfg[queue].high_credit,
2216                                 priv->plat->tx_queues_cfg[queue].low_credit,
2217                                 queue);
2218         }
2219 }
2220
2221 /**
2222  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2223  *  @priv: driver private structure
2224  *  Description: It is used for mapping RX queues to RX dma channels
2225  */
2226 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2227 {
2228         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2229         u32 queue;
2230         u32 chan;
2231
2232         for (queue = 0; queue < rx_queues_count; queue++) {
2233                 chan = priv->plat->rx_queues_cfg[queue].chan;
2234                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2235         }
2236 }
2237
2238 /**
2239  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2240  *  @priv: driver private structure
2241  *  Description: It is used for configuring the RX Queue Priority
2242  */
2243 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2244 {
2245         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2246         u32 queue;
2247         u32 prio;
2248
2249         for (queue = 0; queue < rx_queues_count; queue++) {
2250                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2251                         continue;
2252
2253                 prio = priv->plat->rx_queues_cfg[queue].prio;
2254                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2255         }
2256 }
2257
2258 /**
2259  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2260  *  @priv: driver private structure
2261  *  Description: It is used for configuring the TX Queue Priority
2262  */
2263 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2264 {
2265         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2266         u32 queue;
2267         u32 prio;
2268
2269         for (queue = 0; queue < tx_queues_count; queue++) {
2270                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2271                         continue;
2272
2273                 prio = priv->plat->tx_queues_cfg[queue].prio;
2274                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2275         }
2276 }
2277
2278 /**
2279  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2280  *  @priv: driver private structure
2281  *  Description: It is used for configuring the RX queue routing
2282  */
2283 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2284 {
2285         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2286         u32 queue;
2287         u8 packet;
2288
2289         for (queue = 0; queue < rx_queues_count; queue++) {
2290                 /* no specific packet type routing specified for the queue */
2291                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2292                         continue;
2293
2294                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2295                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2296         }
2297 }
2298
2299 /**
2300  *  stmmac_mtl_configuration - Configure MTL
2301  *  @priv: driver private structure
2302  *  Description: It is used for configurring MTL
2303  */
2304 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2305 {
2306         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2307         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2308
2309         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2310                 stmmac_set_tx_queue_weight(priv);
2311
2312         /* Configure MTL RX algorithms */
2313         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2314                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2315                                                 priv->plat->rx_sched_algorithm);
2316
2317         /* Configure MTL TX algorithms */
2318         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2319                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2320                                                 priv->plat->tx_sched_algorithm);
2321
2322         /* Configure CBS in AVB TX queues */
2323         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2324                 stmmac_configure_cbs(priv);
2325
2326         /* Map RX MTL to DMA channels */
2327         if (priv->hw->mac->map_mtl_to_dma)
2328                 stmmac_rx_queue_dma_chan_map(priv);
2329
2330         /* Enable MAC RX Queues */
2331         if (priv->hw->mac->rx_queue_enable)
2332                 stmmac_mac_enable_rx_queues(priv);
2333
2334         /* Set RX priorities */
2335         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2336                 stmmac_mac_config_rx_queues_prio(priv);
2337
2338         /* Set TX priorities */
2339         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2340                 stmmac_mac_config_tx_queues_prio(priv);
2341
2342         /* Set RX routing */
2343         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2344                 stmmac_mac_config_rx_queues_routing(priv);
2345 }
2346
2347 /**
2348  * stmmac_hw_setup - setup mac in a usable state.
2349  *  @dev : pointer to the device structure.
2350  *  Description:
2351  *  this is the main function to setup the HW in a usable state because the
2352  *  dma engine is reset, the core registers are configured (e.g. AXI,
2353  *  Checksum features, timers). The DMA is ready to start receiving and
2354  *  transmitting.
2355  *  Return value:
2356  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2357  *  file on failure.
2358  */
2359 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2360 {
2361         struct stmmac_priv *priv = netdev_priv(dev);
2362         u32 rx_cnt = priv->plat->rx_queues_to_use;
2363         u32 tx_cnt = priv->plat->tx_queues_to_use;
2364         u32 chan;
2365         int ret;
2366
2367         /* DMA initialization and SW reset */
2368         ret = stmmac_init_dma_engine(priv);
2369         if (ret < 0) {
2370                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2371                            __func__);
2372                 return ret;
2373         }
2374
2375         /* Copy the MAC addr into the HW  */
2376         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2377
2378         /* PS and related bits will be programmed according to the speed */
2379         if (priv->hw->pcs) {
2380                 int speed = priv->plat->mac_port_sel_speed;
2381
2382                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2383                     (speed == SPEED_1000)) {
2384                         priv->hw->ps = speed;
2385                 } else {
2386                         dev_warn(priv->device, "invalid port speed\n");
2387                         priv->hw->ps = 0;
2388                 }
2389         }
2390
2391         /* Initialize the MAC Core */
2392         priv->hw->mac->core_init(priv->hw, dev->mtu);
2393
2394         /* Initialize MTL*/
2395         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2396                 stmmac_mtl_configuration(priv);
2397
2398         ret = priv->hw->mac->rx_ipc(priv->hw);
2399         if (!ret) {
2400                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2401                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2402                 priv->hw->rx_csum = 0;
2403         }
2404
2405         /* Enable the MAC Rx/Tx */
2406         priv->hw->mac->set_mac(priv->ioaddr, true);
2407
2408         /* Set the HW DMA mode and the COE */
2409         stmmac_dma_operation_mode(priv);
2410
2411         stmmac_mmc_setup(priv);
2412
2413         if (init_ptp) {
2414                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2415                 if (ret < 0)
2416                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2417
2418                 ret = stmmac_init_ptp(priv);
2419                 if (ret == -EOPNOTSUPP)
2420                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2421                 else if (ret)
2422                         netdev_warn(priv->dev, "PTP init failed\n");
2423         }
2424
2425 #ifdef CONFIG_DEBUG_FS
2426         ret = stmmac_init_fs(dev);
2427         if (ret < 0)
2428                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2429                             __func__);
2430 #endif
2431         /* Start the ball rolling... */
2432         stmmac_start_all_dma(priv);
2433
2434         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2435
2436         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2437                 priv->rx_riwt = MAX_DMA_RIWT;
2438                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2439         }
2440
2441         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2442                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2443
2444         /* set TX and RX rings length */
2445         stmmac_set_rings_length(priv);
2446
2447         /* Enable TSO */
2448         if (priv->tso) {
2449                 for (chan = 0; chan < tx_cnt; chan++)
2450                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2451         }
2452
2453         return 0;
2454 }
2455
2456 static void stmmac_hw_teardown(struct net_device *dev)
2457 {
2458         struct stmmac_priv *priv = netdev_priv(dev);
2459
2460         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2461 }
2462
2463 /**
2464  *  stmmac_open - open entry point of the driver
2465  *  @dev : pointer to the device structure.
2466  *  Description:
2467  *  This function is the open entry point of the driver.
2468  *  Return value:
2469  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2470  *  file on failure.
2471  */
2472 static int stmmac_open(struct net_device *dev)
2473 {
2474         struct stmmac_priv *priv = netdev_priv(dev);
2475         int ret;
2476
2477         stmmac_check_ether_addr(priv);
2478
2479         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2480             priv->hw->pcs != STMMAC_PCS_TBI &&
2481             priv->hw->pcs != STMMAC_PCS_RTBI) {
2482                 ret = stmmac_init_phy(dev);
2483                 if (ret) {
2484                         netdev_err(priv->dev,
2485                                    "%s: Cannot attach to PHY (error: %d)\n",
2486                                    __func__, ret);
2487                         return ret;
2488                 }
2489         }
2490
2491         /* Extra statistics */
2492         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2493         priv->xstats.threshold = tc;
2494
2495         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2496         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2497
2498         ret = alloc_dma_desc_resources(priv);
2499         if (ret < 0) {
2500                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2501                            __func__);
2502                 goto dma_desc_error;
2503         }
2504
2505         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2506         if (ret < 0) {
2507                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2508                            __func__);
2509                 goto init_error;
2510         }
2511
2512         ret = stmmac_hw_setup(dev, true);
2513         if (ret < 0) {
2514                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2515                 goto init_error;
2516         }
2517
2518         stmmac_init_tx_coalesce(priv);
2519
2520         if (dev->phydev)
2521                 phy_start(dev->phydev);
2522
2523         /* Request the IRQ lines */
2524         ret = request_irq(dev->irq, stmmac_interrupt,
2525                           IRQF_SHARED, dev->name, dev);
2526         if (unlikely(ret < 0)) {
2527                 netdev_err(priv->dev,
2528                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2529                            __func__, dev->irq, ret);
2530                 goto irq_error;
2531         }
2532
2533         /* Request the Wake IRQ in case of another line is used for WoL */
2534         if (priv->wol_irq != dev->irq) {
2535                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2536                                   IRQF_SHARED, dev->name, dev);
2537                 if (unlikely(ret < 0)) {
2538                         netdev_err(priv->dev,
2539                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2540                                    __func__, priv->wol_irq, ret);
2541                         goto wolirq_error;
2542                 }
2543         }
2544
2545         /* Request the IRQ lines */
2546         if (priv->lpi_irq > 0) {
2547                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2548                                   dev->name, dev);
2549                 if (unlikely(ret < 0)) {
2550                         netdev_err(priv->dev,
2551                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2552                                    __func__, priv->lpi_irq, ret);
2553                         goto lpiirq_error;
2554                 }
2555         }
2556
2557         napi_enable(&priv->napi);
2558         netif_start_queue(dev);
2559
2560         return 0;
2561
2562 lpiirq_error:
2563         if (priv->wol_irq != dev->irq)
2564                 free_irq(priv->wol_irq, dev);
2565 wolirq_error:
2566         free_irq(dev->irq, dev);
2567 irq_error:
2568         if (dev->phydev)
2569                 phy_stop(dev->phydev);
2570
2571         del_timer_sync(&priv->txtimer);
2572         stmmac_hw_teardown(dev);
2573 init_error:
2574         free_dma_desc_resources(priv);
2575 dma_desc_error:
2576         if (dev->phydev)
2577                 phy_disconnect(dev->phydev);
2578
2579         return ret;
2580 }
2581
2582 /**
2583  *  stmmac_release - close entry point of the driver
2584  *  @dev : device pointer.
2585  *  Description:
2586  *  This is the stop entry point of the driver.
2587  */
2588 static int stmmac_release(struct net_device *dev)
2589 {
2590         struct stmmac_priv *priv = netdev_priv(dev);
2591
2592         if (priv->eee_enabled)
2593                 del_timer_sync(&priv->eee_ctrl_timer);
2594
2595         /* Stop and disconnect the PHY */
2596         if (dev->phydev) {
2597                 phy_stop(dev->phydev);
2598                 phy_disconnect(dev->phydev);
2599         }
2600
2601         netif_stop_queue(dev);
2602
2603         napi_disable(&priv->napi);
2604
2605         del_timer_sync(&priv->txtimer);
2606
2607         /* Free the IRQ lines */
2608         free_irq(dev->irq, dev);
2609         if (priv->wol_irq != dev->irq)
2610                 free_irq(priv->wol_irq, dev);
2611         if (priv->lpi_irq > 0)
2612                 free_irq(priv->lpi_irq, dev);
2613
2614         /* Stop TX/RX DMA and clear the descriptors */
2615         stmmac_stop_all_dma(priv);
2616
2617         /* Release and free the Rx/Tx resources */
2618         free_dma_desc_resources(priv);
2619
2620         /* Disable the MAC Rx/Tx */
2621         priv->hw->mac->set_mac(priv->ioaddr, false);
2622
2623         netif_carrier_off(dev);
2624
2625 #ifdef CONFIG_DEBUG_FS
2626         stmmac_exit_fs(dev);
2627 #endif
2628
2629         stmmac_release_ptp(priv);
2630
2631         return 0;
2632 }
2633
2634 /**
2635  *  stmmac_tso_allocator - close entry point of the driver
2636  *  @priv: driver private structure
2637  *  @des: buffer start address
2638  *  @total_len: total length to fill in descriptors
2639  *  @last_segmant: condition for the last descriptor
2640  *  @queue: TX queue index
2641  *  Description:
2642  *  This function fills descriptor and request new descriptors according to
2643  *  buffer length to fill
2644  */
2645 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2646                                  int total_len, bool last_segment, u32 queue)
2647 {
2648         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2649         struct dma_desc *desc;
2650         u32 buff_size;
2651         int tmp_len;
2652
2653         tmp_len = total_len;
2654
2655         while (tmp_len > 0) {
2656                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2657                 desc = tx_q->dma_tx + tx_q->cur_tx;
2658
2659                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2660                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2661                             TSO_MAX_BUFF_SIZE : tmp_len;
2662
2663                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2664                         0, 1,
2665                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2666                         0, 0);
2667
2668                 tmp_len -= TSO_MAX_BUFF_SIZE;
2669         }
2670 }
2671
2672 /**
2673  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2674  *  @skb : the socket buffer
2675  *  @dev : device pointer
2676  *  Description: this is the transmit function that is called on TSO frames
2677  *  (support available on GMAC4 and newer chips).
2678  *  Diagram below show the ring programming in case of TSO frames:
2679  *
2680  *  First Descriptor
2681  *   --------
2682  *   | DES0 |---> buffer1 = L2/L3/L4 header
2683  *   | DES1 |---> TCP Payload (can continue on next descr...)
2684  *   | DES2 |---> buffer 1 and 2 len
2685  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2686  *   --------
2687  *      |
2688  *     ...
2689  *      |
2690  *   --------
2691  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2692  *   | DES1 | --|
2693  *   | DES2 | --> buffer 1 and 2 len
2694  *   | DES3 |
2695  *   --------
2696  *
2697  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2698  */
2699 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2700 {
2701         struct dma_desc *desc, *first, *mss_desc = NULL;
2702         struct stmmac_priv *priv = netdev_priv(dev);
2703         int nfrags = skb_shinfo(skb)->nr_frags;
2704         u32 queue = skb_get_queue_mapping(skb);
2705         unsigned int first_entry, des;
2706         struct stmmac_tx_queue *tx_q;
2707         int tmp_pay_len = 0;
2708         u32 pay_len, mss;
2709         u8 proto_hdr_len;
2710         int i;
2711
2712         tx_q = &priv->tx_queue[queue];
2713
2714         /* Compute header lengths */
2715         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2716
2717         /* Desc availability based on threshold should be enough safe */
2718         if (unlikely(stmmac_tx_avail(priv, queue) <
2719                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2720                 if (!netif_queue_stopped(dev)) {
2721                         netif_stop_queue(dev);
2722                         /* This is a hard error, log it. */
2723                         netdev_err(priv->dev,
2724                                    "%s: Tx Ring full when queue awake\n",
2725                                    __func__);
2726                 }
2727                 return NETDEV_TX_BUSY;
2728         }
2729
2730         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2731
2732         mss = skb_shinfo(skb)->gso_size;
2733
2734         /* set new MSS value if needed */
2735         if (mss != priv->mss) {
2736                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2737                 priv->hw->desc->set_mss(mss_desc, mss);
2738                 priv->mss = mss;
2739                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2740         }
2741
2742         if (netif_msg_tx_queued(priv)) {
2743                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2744                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2745                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2746                         skb->data_len);
2747         }
2748
2749         first_entry = tx_q->cur_tx;
2750
2751         desc = tx_q->dma_tx + first_entry;
2752         first = desc;
2753
2754         /* first descriptor: fill Headers on Buf1 */
2755         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2756                              DMA_TO_DEVICE);
2757         if (dma_mapping_error(priv->device, des))
2758                 goto dma_map_err;
2759
2760         tx_q->tx_skbuff_dma[first_entry].buf = des;
2761         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2762         tx_q->tx_skbuff[first_entry] = skb;
2763
2764         first->des0 = cpu_to_le32(des);
2765
2766         /* Fill start of payload in buff2 of first descriptor */
2767         if (pay_len)
2768                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2769
2770         /* If needed take extra descriptors to fill the remaining payload */
2771         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2772
2773         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2774
2775         /* Prepare fragments */
2776         for (i = 0; i < nfrags; i++) {
2777                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2778
2779                 des = skb_frag_dma_map(priv->device, frag, 0,
2780                                        skb_frag_size(frag),
2781                                        DMA_TO_DEVICE);
2782                 if (dma_mapping_error(priv->device, des))
2783                         goto dma_map_err;
2784
2785                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2786                                      (i == nfrags - 1), queue);
2787
2788                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2789                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2790                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2791                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2792         }
2793
2794         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2795
2796         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2797
2798         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2799                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2800                           __func__);
2801                 netif_stop_queue(dev);
2802         }
2803
2804         dev->stats.tx_bytes += skb->len;
2805         priv->xstats.tx_tso_frames++;
2806         priv->xstats.tx_tso_nfrags += nfrags;
2807
2808         /* Manage tx mitigation */
2809         priv->tx_count_frames += nfrags + 1;
2810         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2811                 mod_timer(&priv->txtimer,
2812                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2813         } else {
2814                 priv->tx_count_frames = 0;
2815                 priv->hw->desc->set_tx_ic(desc);
2816                 priv->xstats.tx_set_ic_bit++;
2817         }
2818
2819         if (!priv->hwts_tx_en)
2820                 skb_tx_timestamp(skb);
2821
2822         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2823                      priv->hwts_tx_en)) {
2824                 /* declare that device is doing timestamping */
2825                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2826                 priv->hw->desc->enable_tx_timestamp(first);
2827         }
2828
2829         /* Complete the first descriptor before granting the DMA */
2830         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2831                         proto_hdr_len,
2832                         pay_len,
2833                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2834                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2835
2836         /* If context desc is used to change MSS */
2837         if (mss_desc)
2838                 priv->hw->desc->set_tx_owner(mss_desc);
2839
2840         /* The own bit must be the latest setting done when prepare the
2841          * descriptor and then barrier is needed to make sure that
2842          * all is coherent before granting the DMA engine.
2843          */
2844         dma_wmb();
2845
2846         if (netif_msg_pktdata(priv)) {
2847                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2848                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2849                         tx_q->cur_tx, first, nfrags);
2850
2851                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2852                                              0);
2853
2854                 pr_info(">>> frame to be transmitted: ");
2855                 print_pkt(skb->data, skb_headlen(skb));
2856         }
2857
2858         netdev_sent_queue(dev, skb->len);
2859
2860         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2861                                        queue);
2862
2863         return NETDEV_TX_OK;
2864
2865 dma_map_err:
2866         dev_err(priv->device, "Tx dma map failed\n");
2867         dev_kfree_skb(skb);
2868         priv->dev->stats.tx_dropped++;
2869         return NETDEV_TX_OK;
2870 }
2871
2872 /**
2873  *  stmmac_xmit - Tx entry point of the driver
2874  *  @skb : the socket buffer
2875  *  @dev : device pointer
2876  *  Description : this is the tx entry point of the driver.
2877  *  It programs the chain or the ring and supports oversized frames
2878  *  and SG feature.
2879  */
2880 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2881 {
2882         struct stmmac_priv *priv = netdev_priv(dev);
2883         unsigned int nopaged_len = skb_headlen(skb);
2884         int i, csum_insertion = 0, is_jumbo = 0;
2885         u32 queue = skb_get_queue_mapping(skb);
2886         int nfrags = skb_shinfo(skb)->nr_frags;
2887         unsigned int entry, first_entry;
2888         struct dma_desc *desc, *first;
2889         struct stmmac_tx_queue *tx_q;
2890         unsigned int enh_desc;
2891         unsigned int des;
2892
2893         tx_q = &priv->tx_queue[queue];
2894
2895         /* Manage oversized TCP frames for GMAC4 device */
2896         if (skb_is_gso(skb) && priv->tso) {
2897                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2898                         return stmmac_tso_xmit(skb, dev);
2899         }
2900
2901         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2902                 if (!netif_queue_stopped(dev)) {
2903                         netif_stop_queue(dev);
2904                         /* This is a hard error, log it. */
2905                         netdev_err(priv->dev,
2906                                    "%s: Tx Ring full when queue awake\n",
2907                                    __func__);
2908                 }
2909                 return NETDEV_TX_BUSY;
2910         }
2911
2912         if (priv->tx_path_in_lpi_mode)
2913                 stmmac_disable_eee_mode(priv);
2914
2915         entry = tx_q->cur_tx;
2916         first_entry = entry;
2917
2918         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2919
2920         if (likely(priv->extend_desc))
2921                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2922         else
2923                 desc = tx_q->dma_tx + entry;
2924
2925         first = desc;
2926
2927         tx_q->tx_skbuff[first_entry] = skb;
2928
2929         enh_desc = priv->plat->enh_desc;
2930         /* To program the descriptors according to the size of the frame */
2931         if (enh_desc)
2932                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2933
2934         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2935                                          DWMAC_CORE_4_00)) {
2936                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
2937                 if (unlikely(entry < 0))
2938                         goto dma_map_err;
2939         }
2940
2941         for (i = 0; i < nfrags; i++) {
2942                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2943                 int len = skb_frag_size(frag);
2944                 bool last_segment = (i == (nfrags - 1));
2945
2946                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2947
2948                 if (likely(priv->extend_desc))
2949                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2950                 else
2951                         desc = tx_q->dma_tx + entry;
2952
2953                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2954                                        DMA_TO_DEVICE);
2955                 if (dma_mapping_error(priv->device, des))
2956                         goto dma_map_err; /* should reuse desc w/o issues */
2957
2958                 tx_q->tx_skbuff[entry] = NULL;
2959
2960                 tx_q->tx_skbuff_dma[entry].buf = des;
2961                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2962                         desc->des0 = cpu_to_le32(des);
2963                 else
2964                         desc->des2 = cpu_to_le32(des);
2965
2966                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
2967                 tx_q->tx_skbuff_dma[entry].len = len;
2968                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
2969
2970                 /* Prepare the descriptor and set the own bit too */
2971                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2972                                                 priv->mode, 1, last_segment);
2973         }
2974
2975         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2976
2977         tx_q->cur_tx = entry;
2978
2979         if (netif_msg_pktdata(priv)) {
2980                 void *tx_head;
2981
2982                 netdev_dbg(priv->dev,
2983                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2984                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2985                            entry, first, nfrags);
2986
2987                 if (priv->extend_desc)
2988                         tx_head = (void *)tx_q->dma_etx;
2989                 else
2990                         tx_head = (void *)tx_q->dma_tx;
2991
2992                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2993
2994                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2995                 print_pkt(skb->data, skb->len);
2996         }
2997
2998         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2999                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3000                           __func__);
3001                 netif_stop_queue(dev);
3002         }
3003
3004         dev->stats.tx_bytes += skb->len;
3005
3006         /* According to the coalesce parameter the IC bit for the latest
3007          * segment is reset and the timer re-started to clean the tx status.
3008          * This approach takes care about the fragments: desc is the first
3009          * element in case of no SG.
3010          */
3011         priv->tx_count_frames += nfrags + 1;
3012         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3013                 mod_timer(&priv->txtimer,
3014                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3015         } else {
3016                 priv->tx_count_frames = 0;
3017                 priv->hw->desc->set_tx_ic(desc);
3018                 priv->xstats.tx_set_ic_bit++;
3019         }
3020
3021         if (!priv->hwts_tx_en)
3022                 skb_tx_timestamp(skb);
3023
3024         /* Ready to fill the first descriptor and set the OWN bit w/o any
3025          * problems because all the descriptors are actually ready to be
3026          * passed to the DMA engine.
3027          */
3028         if (likely(!is_jumbo)) {
3029                 bool last_segment = (nfrags == 0);
3030
3031                 des = dma_map_single(priv->device, skb->data,
3032                                      nopaged_len, DMA_TO_DEVICE);
3033                 if (dma_mapping_error(priv->device, des))
3034                         goto dma_map_err;
3035
3036                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3037                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3038                         first->des0 = cpu_to_le32(des);
3039                 else
3040                         first->des2 = cpu_to_le32(des);
3041
3042                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3043                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3044
3045                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3046                              priv->hwts_tx_en)) {
3047                         /* declare that device is doing timestamping */
3048                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3049                         priv->hw->desc->enable_tx_timestamp(first);
3050                 }
3051
3052                 /* Prepare the first descriptor setting the OWN bit too */
3053                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3054                                                 csum_insertion, priv->mode, 1,
3055                                                 last_segment);
3056
3057                 /* The own bit must be the latest setting done when prepare the
3058                  * descriptor and then barrier is needed to make sure that
3059                  * all is coherent before granting the DMA engine.
3060                  */
3061                 dma_wmb();
3062         }
3063
3064         netdev_sent_queue(dev, skb->len);
3065
3066         if (priv->synopsys_id < DWMAC_CORE_4_00)
3067                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3068         else
3069                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3070                                                queue);
3071
3072         return NETDEV_TX_OK;
3073
3074 dma_map_err:
3075         netdev_err(priv->dev, "Tx DMA map failed\n");
3076         dev_kfree_skb(skb);
3077         priv->dev->stats.tx_dropped++;
3078         return NETDEV_TX_OK;
3079 }
3080
3081 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3082 {
3083         struct ethhdr *ehdr;
3084         u16 vlanid;
3085
3086         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3087             NETIF_F_HW_VLAN_CTAG_RX &&
3088             !__vlan_get_tag(skb, &vlanid)) {
3089                 /* pop the vlan tag */
3090                 ehdr = (struct ethhdr *)skb->data;
3091                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3092                 skb_pull(skb, VLAN_HLEN);
3093                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3094         }
3095 }
3096
3097
3098 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3099 {
3100         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3101                 return 0;
3102
3103         return 1;
3104 }
3105
3106 /**
3107  * stmmac_rx_refill - refill used skb preallocated buffers
3108  * @priv: driver private structure
3109  * @queue: RX queue index
3110  * Description : this is to reallocate the skb for the reception process
3111  * that is based on zero-copy.
3112  */
3113 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3114 {
3115         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3116         int dirty = stmmac_rx_dirty(priv, queue);
3117         unsigned int entry = rx_q->dirty_rx;
3118
3119         int bfsize = priv->dma_buf_sz;
3120
3121         while (dirty-- > 0) {
3122                 struct dma_desc *p;
3123
3124                 if (priv->extend_desc)
3125                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3126                 else
3127                         p = rx_q->dma_rx + entry;
3128
3129                 if (likely(!rx_q->rx_skbuff[entry])) {
3130                         struct sk_buff *skb;
3131
3132                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3133                         if (unlikely(!skb)) {
3134                                 /* so for a while no zero-copy! */
3135                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3136                                 if (unlikely(net_ratelimit()))
3137                                         dev_err(priv->device,
3138                                                 "fail to alloc skb entry %d\n",
3139                                                 entry);
3140                                 break;
3141                         }
3142
3143                         rx_q->rx_skbuff[entry] = skb;
3144                         rx_q->rx_skbuff_dma[entry] =
3145                             dma_map_single(priv->device, skb->data, bfsize,
3146                                            DMA_FROM_DEVICE);
3147                         if (dma_mapping_error(priv->device,
3148                                               rx_q->rx_skbuff_dma[entry])) {
3149                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3150                                 dev_kfree_skb(skb);
3151                                 break;
3152                         }
3153
3154                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3155                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3156                                 p->des1 = 0;
3157                         } else {
3158                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3159                         }
3160                         if (priv->hw->mode->refill_desc3)
3161                                 priv->hw->mode->refill_desc3(rx_q, p);
3162
3163                         if (rx_q->rx_zeroc_thresh > 0)
3164                                 rx_q->rx_zeroc_thresh--;
3165
3166                         netif_dbg(priv, rx_status, priv->dev,
3167                                   "refill entry #%d\n", entry);
3168                 }
3169                 dma_wmb();
3170
3171                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3172                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3173                 else
3174                         priv->hw->desc->set_rx_owner(p);
3175
3176                 dma_wmb();
3177
3178                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3179         }
3180         rx_q->dirty_rx = entry;
3181 }
3182
3183 /**
3184  * stmmac_rx - manage the receive process
3185  * @priv: driver private structure
3186  * @limit: napi bugget
3187  * @queue: RX queue index.
3188  * Description :  this the function called by the napi poll method.
3189  * It gets all the frames inside the ring.
3190  */
3191 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3192 {
3193         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3194         unsigned int entry = rx_q->cur_rx;
3195         int coe = priv->hw->rx_csum;
3196         unsigned int next_entry;
3197         unsigned int count = 0;
3198
3199         if (netif_msg_rx_status(priv)) {
3200                 void *rx_head;
3201
3202                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3203                 if (priv->extend_desc)
3204                         rx_head = (void *)rx_q->dma_erx;
3205                 else
3206                         rx_head = (void *)rx_q->dma_rx;
3207
3208                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3209         }
3210         while (count < limit) {
3211                 int status;
3212                 struct dma_desc *p;
3213                 struct dma_desc *np;
3214
3215                 if (priv->extend_desc)
3216                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3217                 else
3218                         p = rx_q->dma_rx + entry;
3219
3220                 /* read the status of the incoming frame */
3221                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3222                                                    &priv->xstats, p);
3223                 /* check if managed by the DMA otherwise go ahead */
3224                 if (unlikely(status & dma_own))
3225                         break;
3226
3227                 count++;
3228
3229                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3230                 next_entry = rx_q->cur_rx;
3231
3232                 if (priv->extend_desc)
3233                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3234                 else
3235                         np = rx_q->dma_rx + next_entry;
3236
3237                 prefetch(np);
3238
3239                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3240                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3241                                                            &priv->xstats,
3242                                                            rx_q->dma_erx +
3243                                                            entry);
3244                 if (unlikely(status == discard_frame)) {
3245                         priv->dev->stats.rx_errors++;
3246                         if (priv->hwts_rx_en && !priv->extend_desc) {
3247                                 /* DESC2 & DESC3 will be overwritten by device
3248                                  * with timestamp value, hence reinitialize
3249                                  * them in stmmac_rx_refill() function so that
3250                                  * device can reuse it.
3251                                  */
3252                                 rx_q->rx_skbuff[entry] = NULL;
3253                                 dma_unmap_single(priv->device,
3254                                                  rx_q->rx_skbuff_dma[entry],
3255                                                  priv->dma_buf_sz,
3256                                                  DMA_FROM_DEVICE);
3257                         }
3258                 } else {
3259                         struct sk_buff *skb;
3260                         int frame_len;
3261                         unsigned int des;
3262
3263                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3264                                 des = le32_to_cpu(p->des0);
3265                         else
3266                                 des = le32_to_cpu(p->des2);
3267
3268                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3269
3270                         /*  If frame length is greater than skb buffer size
3271                          *  (preallocated during init) then the packet is
3272                          *  ignored
3273                          */
3274                         if (frame_len > priv->dma_buf_sz) {
3275                                 netdev_err(priv->dev,
3276                                            "len %d larger than size (%d)\n",
3277                                            frame_len, priv->dma_buf_sz);
3278                                 priv->dev->stats.rx_length_errors++;
3279                                 break;
3280                         }
3281
3282                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3283                          * Type frames (LLC/LLC-SNAP)
3284                          */
3285                         if (unlikely(status != llc_snap))
3286                                 frame_len -= ETH_FCS_LEN;
3287
3288                         if (netif_msg_rx_status(priv)) {
3289                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3290                                            p, entry, des);
3291                                 if (frame_len > ETH_FRAME_LEN)
3292                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3293                                                    frame_len, status);
3294                         }
3295
3296                         /* The zero-copy is always used for all the sizes
3297                          * in case of GMAC4 because it needs
3298                          * to refill the used descriptors, always.
3299                          */
3300                         if (unlikely(!priv->plat->has_gmac4 &&
3301                                      ((frame_len < priv->rx_copybreak) ||
3302                                      stmmac_rx_threshold_count(rx_q)))) {
3303                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3304                                                                 frame_len);
3305                                 if (unlikely(!skb)) {
3306                                         if (net_ratelimit())
3307                                                 dev_warn(priv->device,
3308                                                          "packet dropped\n");
3309                                         priv->dev->stats.rx_dropped++;
3310                                         break;
3311                                 }
3312
3313                                 dma_sync_single_for_cpu(priv->device,
3314                                                         rx_q->rx_skbuff_dma
3315                                                         [entry], frame_len,
3316                                                         DMA_FROM_DEVICE);
3317                                 skb_copy_to_linear_data(skb,
3318                                                         rx_q->
3319                                                         rx_skbuff[entry]->data,
3320                                                         frame_len);
3321
3322                                 skb_put(skb, frame_len);
3323                                 dma_sync_single_for_device(priv->device,
3324                                                            rx_q->rx_skbuff_dma
3325                                                            [entry], frame_len,
3326                                                            DMA_FROM_DEVICE);
3327                         } else {
3328                                 skb = rx_q->rx_skbuff[entry];
3329                                 if (unlikely(!skb)) {
3330                                         netdev_err(priv->dev,
3331                                                    "%s: Inconsistent Rx chain\n",
3332                                                    priv->dev->name);
3333                                         priv->dev->stats.rx_dropped++;
3334                                         break;
3335                                 }
3336                                 prefetch(skb->data - NET_IP_ALIGN);
3337                                 rx_q->rx_skbuff[entry] = NULL;
3338                                 rx_q->rx_zeroc_thresh++;
3339
3340                                 skb_put(skb, frame_len);
3341                                 dma_unmap_single(priv->device,
3342                                                  rx_q->rx_skbuff_dma[entry],
3343                                                  priv->dma_buf_sz,
3344                                                  DMA_FROM_DEVICE);
3345                         }
3346
3347                         if (netif_msg_pktdata(priv)) {
3348                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3349                                            frame_len);
3350                                 print_pkt(skb->data, frame_len);
3351                         }
3352
3353                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3354
3355                         stmmac_rx_vlan(priv->dev, skb);
3356
3357                         skb->protocol = eth_type_trans(skb, priv->dev);
3358
3359                         if (unlikely(!coe))
3360                                 skb_checksum_none_assert(skb);
3361                         else
3362                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3363
3364                         napi_gro_receive(&priv->napi, skb);
3365
3366                         priv->dev->stats.rx_packets++;
3367                         priv->dev->stats.rx_bytes += frame_len;
3368                 }
3369                 entry = next_entry;
3370         }
3371
3372         stmmac_rx_refill(priv, queue);
3373
3374         priv->xstats.rx_pkt_n += count;
3375
3376         return count;
3377 }
3378
3379 /**
3380  *  stmmac_poll - stmmac poll method (NAPI)
3381  *  @napi : pointer to the napi structure.
3382  *  @budget : maximum number of packets that the current CPU can receive from
3383  *            all interfaces.
3384  *  Description :
3385  *  To look at the incoming frames and clear the tx resources.
3386  */
3387 static int stmmac_poll(struct napi_struct *napi, int budget)
3388 {
3389         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
3390         u32 tx_count = priv->plat->tx_queues_to_use;
3391         u32 chan = STMMAC_CHAN0;
3392         int work_done = 0;
3393         u32 queue = chan;
3394
3395         priv->xstats.napi_poll++;
3396
3397         /* check all the queues */
3398         for (queue = 0; queue < tx_count; queue++)
3399                 stmmac_tx_clean(priv, queue);
3400
3401         queue = chan;
3402
3403         work_done = stmmac_rx(priv, budget, queue);
3404         if (work_done < budget) {
3405                 napi_complete_done(napi, work_done);
3406                 stmmac_enable_dma_irq(priv, chan);
3407         }
3408         return work_done;
3409 }
3410
3411 /**
3412  *  stmmac_tx_timeout
3413  *  @dev : Pointer to net device structure
3414  *  Description: this function is called when a packet transmission fails to
3415  *   complete within a reasonable time. The driver will mark the error in the
3416  *   netdev structure and arrange for the device to be reset to a sane state
3417  *   in order to transmit a new packet.
3418  */
3419 static void stmmac_tx_timeout(struct net_device *dev)
3420 {
3421         struct stmmac_priv *priv = netdev_priv(dev);
3422         u32 tx_count = priv->plat->tx_queues_to_use;
3423         u32 chan;
3424
3425         /* Clear Tx resources and restart transmitting again */
3426         for (chan = 0; chan < tx_count; chan++)
3427                 stmmac_tx_err(priv, chan);
3428 }
3429
3430 /**
3431  *  stmmac_set_rx_mode - entry point for multicast addressing
3432  *  @dev : pointer to the device structure
3433  *  Description:
3434  *  This function is a driver entry point which gets called by the kernel
3435  *  whenever multicast addresses must be enabled/disabled.
3436  *  Return value:
3437  *  void.
3438  */
3439 static void stmmac_set_rx_mode(struct net_device *dev)
3440 {
3441         struct stmmac_priv *priv = netdev_priv(dev);
3442
3443         priv->hw->mac->set_filter(priv->hw, dev);
3444 }
3445
3446 /**
3447  *  stmmac_change_mtu - entry point to change MTU size for the device.
3448  *  @dev : device pointer.
3449  *  @new_mtu : the new MTU size for the device.
3450  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3451  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3452  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3453  *  Return value:
3454  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3455  *  file on failure.
3456  */
3457 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3458 {
3459         struct stmmac_priv *priv = netdev_priv(dev);
3460
3461         if (netif_running(dev)) {
3462                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3463                 return -EBUSY;
3464         }
3465
3466         dev->mtu = new_mtu;
3467
3468         netdev_update_features(dev);
3469
3470         return 0;
3471 }
3472
3473 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3474                                              netdev_features_t features)
3475 {
3476         struct stmmac_priv *priv = netdev_priv(dev);
3477
3478         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3479                 features &= ~NETIF_F_RXCSUM;
3480
3481         if (!priv->plat->tx_coe)
3482                 features &= ~NETIF_F_CSUM_MASK;
3483
3484         /* Some GMAC devices have a bugged Jumbo frame support that
3485          * needs to have the Tx COE disabled for oversized frames
3486          * (due to limited buffer sizes). In this case we disable
3487          * the TX csum insertion in the TDES and not use SF.
3488          */
3489         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3490                 features &= ~NETIF_F_CSUM_MASK;
3491
3492         /* Disable tso if asked by ethtool */
3493         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3494                 if (features & NETIF_F_TSO)
3495                         priv->tso = true;
3496                 else
3497                         priv->tso = false;
3498         }
3499
3500         return features;
3501 }
3502
3503 static int stmmac_set_features(struct net_device *netdev,
3504                                netdev_features_t features)
3505 {
3506         struct stmmac_priv *priv = netdev_priv(netdev);
3507
3508         /* Keep the COE Type in case of csum is supporting */
3509         if (features & NETIF_F_RXCSUM)
3510                 priv->hw->rx_csum = priv->plat->rx_coe;
3511         else
3512                 priv->hw->rx_csum = 0;
3513         /* No check needed because rx_coe has been set before and it will be
3514          * fixed in case of issue.
3515          */
3516         priv->hw->mac->rx_ipc(priv->hw);
3517
3518         return 0;
3519 }
3520
3521 /**
3522  *  stmmac_interrupt - main ISR
3523  *  @irq: interrupt number.
3524  *  @dev_id: to pass the net device pointer.
3525  *  Description: this is the main driver interrupt service routine.
3526  *  It can call:
3527  *  o DMA service routine (to manage incoming frame reception and transmission
3528  *    status)
3529  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3530  *    interrupts.
3531  */
3532 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3533 {
3534         struct net_device *dev = (struct net_device *)dev_id;
3535         struct stmmac_priv *priv = netdev_priv(dev);
3536         u32 rx_cnt = priv->plat->rx_queues_to_use;
3537         u32 tx_cnt = priv->plat->tx_queues_to_use;
3538         u32 queues_count;
3539         u32 queue;
3540
3541         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3542
3543         if (priv->irq_wake)
3544                 pm_wakeup_event(priv->device, 0);
3545
3546         if (unlikely(!dev)) {
3547                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3548                 return IRQ_NONE;
3549         }
3550
3551         /* To handle GMAC own interrupts */
3552         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3553                 int status = priv->hw->mac->host_irq_status(priv->hw,
3554                                                             &priv->xstats);
3555
3556                 if (unlikely(status)) {
3557                         /* For LPI we need to save the tx status */
3558                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3559                                 priv->tx_path_in_lpi_mode = true;
3560                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3561                                 priv->tx_path_in_lpi_mode = false;
3562                 }
3563
3564                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3565                         for (queue = 0; queue < queues_count; queue++) {
3566                                 struct stmmac_rx_queue *rx_q =
3567                                 &priv->rx_queue[queue];
3568
3569                                 status |=
3570                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3571                                                                    queue);
3572
3573                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3574                                     priv->hw->dma->set_rx_tail_ptr)
3575                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3576                                                                 rx_q->rx_tail_addr,
3577                                                                 queue);
3578                         }
3579                 }
3580
3581                 /* PCS link status */
3582                 if (priv->hw->pcs) {
3583                         if (priv->xstats.pcs_link)
3584                                 netif_carrier_on(dev);
3585                         else
3586                                 netif_carrier_off(dev);
3587                 }
3588         }
3589
3590         /* To handle DMA interrupts */
3591         stmmac_dma_interrupt(priv);
3592
3593         return IRQ_HANDLED;
3594 }
3595
3596 #ifdef CONFIG_NET_POLL_CONTROLLER
3597 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3598  * to allow network I/O with interrupts disabled.
3599  */
3600 static void stmmac_poll_controller(struct net_device *dev)
3601 {
3602         disable_irq(dev->irq);
3603         stmmac_interrupt(dev->irq, dev);
3604         enable_irq(dev->irq);
3605 }
3606 #endif
3607
3608 /**
3609  *  stmmac_ioctl - Entry point for the Ioctl
3610  *  @dev: Device pointer.
3611  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3612  *  a proprietary structure used to pass information to the driver.
3613  *  @cmd: IOCTL command
3614  *  Description:
3615  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3616  */
3617 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3618 {
3619         int ret = -EOPNOTSUPP;
3620
3621         if (!netif_running(dev))
3622                 return -EINVAL;
3623
3624         switch (cmd) {
3625         case SIOCGMIIPHY:
3626         case SIOCGMIIREG:
3627         case SIOCSMIIREG:
3628                 if (!dev->phydev)
3629                         return -EINVAL;
3630                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3631                 break;
3632         case SIOCSHWTSTAMP:
3633                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3634                 break;
3635         default:
3636                 break;
3637         }
3638
3639         return ret;
3640 }
3641
3642 #ifdef CONFIG_DEBUG_FS
3643 static struct dentry *stmmac_fs_dir;
3644
3645 static void sysfs_display_ring(void *head, int size, int extend_desc,
3646                                struct seq_file *seq)
3647 {
3648         int i;
3649         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3650         struct dma_desc *p = (struct dma_desc *)head;
3651
3652         for (i = 0; i < size; i++) {
3653                 if (extend_desc) {
3654                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3655                                    i, (unsigned int)virt_to_phys(ep),
3656                                    le32_to_cpu(ep->basic.des0),
3657                                    le32_to_cpu(ep->basic.des1),
3658                                    le32_to_cpu(ep->basic.des2),
3659                                    le32_to_cpu(ep->basic.des3));
3660                         ep++;
3661                 } else {
3662                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3663                                    i, (unsigned int)virt_to_phys(ep),
3664                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3665                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3666                         p++;
3667                 }
3668                 seq_printf(seq, "\n");
3669         }
3670 }
3671
3672 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3673 {
3674         struct net_device *dev = seq->private;
3675         struct stmmac_priv *priv = netdev_priv(dev);
3676         u32 rx_count = priv->plat->rx_queues_to_use;
3677         u32 tx_count = priv->plat->tx_queues_to_use;
3678         u32 queue;
3679
3680         for (queue = 0; queue < rx_count; queue++) {
3681                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3682
3683                 seq_printf(seq, "RX Queue %d:\n", queue);
3684
3685                 if (priv->extend_desc) {
3686                         seq_printf(seq, "Extended descriptor ring:\n");
3687                         sysfs_display_ring((void *)rx_q->dma_erx,
3688                                            DMA_RX_SIZE, 1, seq);
3689                 } else {
3690                         seq_printf(seq, "Descriptor ring:\n");
3691                         sysfs_display_ring((void *)rx_q->dma_rx,
3692                                            DMA_RX_SIZE, 0, seq);
3693                 }
3694         }
3695
3696         for (queue = 0; queue < tx_count; queue++) {
3697                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3698
3699                 seq_printf(seq, "TX Queue %d:\n", queue);
3700
3701                 if (priv->extend_desc) {
3702                         seq_printf(seq, "Extended descriptor ring:\n");
3703                         sysfs_display_ring((void *)tx_q->dma_etx,
3704                                            DMA_TX_SIZE, 1, seq);
3705                 } else {
3706                         seq_printf(seq, "Descriptor ring:\n");
3707                         sysfs_display_ring((void *)tx_q->dma_tx,
3708                                            DMA_TX_SIZE, 0, seq);
3709                 }
3710         }
3711
3712         return 0;
3713 }
3714
3715 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3716 {
3717         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3718 }
3719
3720 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3721
3722 static const struct file_operations stmmac_rings_status_fops = {
3723         .owner = THIS_MODULE,
3724         .open = stmmac_sysfs_ring_open,
3725         .read = seq_read,
3726         .llseek = seq_lseek,
3727         .release = single_release,
3728 };
3729
3730 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3731 {
3732         struct net_device *dev = seq->private;
3733         struct stmmac_priv *priv = netdev_priv(dev);
3734
3735         if (!priv->hw_cap_support) {
3736                 seq_printf(seq, "DMA HW features not supported\n");
3737                 return 0;
3738         }
3739
3740         seq_printf(seq, "==============================\n");
3741         seq_printf(seq, "\tDMA HW features\n");
3742         seq_printf(seq, "==============================\n");
3743
3744         seq_printf(seq, "\t10/100 Mbps: %s\n",
3745                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3746         seq_printf(seq, "\t1000 Mbps: %s\n",
3747                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3748         seq_printf(seq, "\tHalf duplex: %s\n",
3749                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3750         seq_printf(seq, "\tHash Filter: %s\n",
3751                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3752         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3753                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3754         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3755                    (priv->dma_cap.pcs) ? "Y" : "N");
3756         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3757                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3758         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3759                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3760         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3761                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3762         seq_printf(seq, "\tRMON module: %s\n",
3763                    (priv->dma_cap.rmon) ? "Y" : "N");
3764         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3765                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3766         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3767                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3768         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3769                    (priv->dma_cap.eee) ? "Y" : "N");
3770         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3771         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3772                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3773         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3774                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3775                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3776         } else {
3777                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3778                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3779                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3780                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3781         }
3782         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3783                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3784         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3785                    priv->dma_cap.number_rx_channel);
3786         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3787                    priv->dma_cap.number_tx_channel);
3788         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3789                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3790
3791         return 0;
3792 }
3793
3794 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3795 {
3796         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3797 }
3798
3799 static const struct file_operations stmmac_dma_cap_fops = {
3800         .owner = THIS_MODULE,
3801         .open = stmmac_sysfs_dma_cap_open,
3802         .read = seq_read,
3803         .llseek = seq_lseek,
3804         .release = single_release,
3805 };
3806
3807 static int stmmac_init_fs(struct net_device *dev)
3808 {
3809         struct stmmac_priv *priv = netdev_priv(dev);
3810
3811         /* Create per netdev entries */
3812         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3813
3814         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3815                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3816
3817                 return -ENOMEM;
3818         }
3819
3820         /* Entry to report DMA RX/TX rings */
3821         priv->dbgfs_rings_status =
3822                 debugfs_create_file("descriptors_status", S_IRUGO,
3823                                     priv->dbgfs_dir, dev,
3824                                     &stmmac_rings_status_fops);
3825
3826         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3827                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3828                 debugfs_remove_recursive(priv->dbgfs_dir);
3829
3830                 return -ENOMEM;
3831         }
3832
3833         /* Entry to report the DMA HW features */
3834         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3835                                             priv->dbgfs_dir,
3836                                             dev, &stmmac_dma_cap_fops);
3837
3838         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3839                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3840                 debugfs_remove_recursive(priv->dbgfs_dir);
3841
3842                 return -ENOMEM;
3843         }
3844
3845         return 0;
3846 }
3847
3848 static void stmmac_exit_fs(struct net_device *dev)
3849 {
3850         struct stmmac_priv *priv = netdev_priv(dev);
3851
3852         debugfs_remove_recursive(priv->dbgfs_dir);
3853 }
3854 #endif /* CONFIG_DEBUG_FS */
3855
3856 static const struct net_device_ops stmmac_netdev_ops = {
3857         .ndo_open = stmmac_open,
3858         .ndo_start_xmit = stmmac_xmit,
3859         .ndo_stop = stmmac_release,
3860         .ndo_change_mtu = stmmac_change_mtu,
3861         .ndo_fix_features = stmmac_fix_features,
3862         .ndo_set_features = stmmac_set_features,
3863         .ndo_set_rx_mode = stmmac_set_rx_mode,
3864         .ndo_tx_timeout = stmmac_tx_timeout,
3865         .ndo_do_ioctl = stmmac_ioctl,
3866 #ifdef CONFIG_NET_POLL_CONTROLLER
3867         .ndo_poll_controller = stmmac_poll_controller,
3868 #endif
3869         .ndo_set_mac_address = eth_mac_addr,
3870 };
3871
3872 /**
3873  *  stmmac_hw_init - Init the MAC device
3874  *  @priv: driver private structure
3875  *  Description: this function is to configure the MAC device according to
3876  *  some platform parameters or the HW capability register. It prepares the
3877  *  driver to use either ring or chain modes and to setup either enhanced or
3878  *  normal descriptors.
3879  */
3880 static int stmmac_hw_init(struct stmmac_priv *priv)
3881 {
3882         struct mac_device_info *mac;
3883
3884         /* Identify the MAC HW device */
3885         if (priv->plat->has_gmac) {
3886                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3887                 mac = dwmac1000_setup(priv->ioaddr,
3888                                       priv->plat->multicast_filter_bins,
3889                                       priv->plat->unicast_filter_entries,
3890                                       &priv->synopsys_id);
3891         } else if (priv->plat->has_gmac4) {
3892                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3893                 mac = dwmac4_setup(priv->ioaddr,
3894                                    priv->plat->multicast_filter_bins,
3895                                    priv->plat->unicast_filter_entries,
3896                                    &priv->synopsys_id);
3897         } else {
3898                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3899         }
3900         if (!mac)
3901                 return -ENOMEM;
3902
3903         priv->hw = mac;
3904
3905         /* To use the chained or ring mode */
3906         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3907                 priv->hw->mode = &dwmac4_ring_mode_ops;
3908         } else {
3909                 if (chain_mode) {
3910                         priv->hw->mode = &chain_mode_ops;
3911                         dev_info(priv->device, "Chain mode enabled\n");
3912                         priv->mode = STMMAC_CHAIN_MODE;
3913                 } else {
3914                         priv->hw->mode = &ring_mode_ops;
3915                         dev_info(priv->device, "Ring mode enabled\n");
3916                         priv->mode = STMMAC_RING_MODE;
3917                 }
3918         }
3919
3920         /* Get the HW capability (new GMAC newer than 3.50a) */
3921         priv->hw_cap_support = stmmac_get_hw_features(priv);
3922         if (priv->hw_cap_support) {
3923                 dev_info(priv->device, "DMA HW capability register supported\n");
3924
3925                 /* We can override some gmac/dma configuration fields: e.g.
3926                  * enh_desc, tx_coe (e.g. that are passed through the
3927                  * platform) with the values from the HW capability
3928                  * register (if supported).
3929                  */
3930                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3931                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3932                 priv->hw->pmt = priv->plat->pmt;
3933
3934                 /* TXCOE doesn't work in thresh DMA mode */
3935                 if (priv->plat->force_thresh_dma_mode)
3936                         priv->plat->tx_coe = 0;
3937                 else
3938                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3939
3940                 /* In case of GMAC4 rx_coe is from HW cap register. */
3941                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3942
3943                 if (priv->dma_cap.rx_coe_type2)
3944                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3945                 else if (priv->dma_cap.rx_coe_type1)
3946                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3947
3948         } else {
3949                 dev_info(priv->device, "No HW DMA feature register supported\n");
3950         }
3951
3952         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3953         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3954                 priv->hw->desc = &dwmac4_desc_ops;
3955         else
3956                 stmmac_selec_desc_mode(priv);
3957
3958         if (priv->plat->rx_coe) {
3959                 priv->hw->rx_csum = priv->plat->rx_coe;
3960                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3961                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3962                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3963         }
3964         if (priv->plat->tx_coe)
3965                 dev_info(priv->device, "TX Checksum insertion supported\n");
3966
3967         if (priv->plat->pmt) {
3968                 dev_info(priv->device, "Wake-Up On Lan supported\n");
3969                 device_set_wakeup_capable(priv->device, 1);
3970         }
3971
3972         if (priv->dma_cap.tsoen)
3973                 dev_info(priv->device, "TSO supported\n");
3974
3975         return 0;
3976 }
3977
3978 /**
3979  * stmmac_dvr_probe
3980  * @device: device pointer
3981  * @plat_dat: platform data pointer
3982  * @res: stmmac resource pointer
3983  * Description: this is the main probe function used to
3984  * call the alloc_etherdev, allocate the priv structure.
3985  * Return:
3986  * returns 0 on success, otherwise errno.
3987  */
3988 int stmmac_dvr_probe(struct device *device,
3989                      struct plat_stmmacenet_data *plat_dat,
3990                      struct stmmac_resources *res)
3991 {
3992         int ret = 0;
3993         struct net_device *ndev = NULL;
3994         struct stmmac_priv *priv;
3995
3996         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3997         if (!ndev)
3998                 return -ENOMEM;
3999
4000         SET_NETDEV_DEV(ndev, device);
4001
4002         priv = netdev_priv(ndev);
4003         priv->device = device;
4004         priv->dev = ndev;
4005
4006         stmmac_set_ethtool_ops(ndev);
4007         priv->pause = pause;
4008         priv->plat = plat_dat;
4009         priv->ioaddr = res->addr;
4010         priv->dev->base_addr = (unsigned long)res->addr;
4011
4012         priv->dev->irq = res->irq;
4013         priv->wol_irq = res->wol_irq;
4014         priv->lpi_irq = res->lpi_irq;
4015
4016         if (res->mac)
4017                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4018
4019         dev_set_drvdata(device, priv->dev);
4020
4021         /* Verify driver arguments */
4022         stmmac_verify_args();
4023
4024         /* Override with kernel parameters if supplied XXX CRS XXX
4025          * this needs to have multiple instances
4026          */
4027         if ((phyaddr >= 0) && (phyaddr <= 31))
4028                 priv->plat->phy_addr = phyaddr;
4029
4030         if (priv->plat->stmmac_rst)
4031                 reset_control_deassert(priv->plat->stmmac_rst);
4032
4033         /* Init MAC and get the capabilities */
4034         ret = stmmac_hw_init(priv);
4035         if (ret)
4036                 goto error_hw_init;
4037
4038         ndev->netdev_ops = &stmmac_netdev_ops;
4039
4040         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4041                             NETIF_F_RXCSUM;
4042
4043         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4044                 ndev->hw_features |= NETIF_F_TSO;
4045                 priv->tso = true;
4046                 dev_info(priv->device, "TSO feature enabled\n");
4047         }
4048         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4049         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4050 #ifdef STMMAC_VLAN_TAG_USED
4051         /* Both mac100 and gmac support receive VLAN tag detection */
4052         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4053 #endif
4054         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4055
4056         /* MTU range: 46 - hw-specific max */
4057         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4058         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4059                 ndev->max_mtu = JUMBO_LEN;
4060         else
4061                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4062         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4063          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4064          */
4065         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4066             (priv->plat->maxmtu >= ndev->min_mtu))
4067                 ndev->max_mtu = priv->plat->maxmtu;
4068         else if (priv->plat->maxmtu < ndev->min_mtu)
4069                 dev_warn(priv->device,
4070                          "%s: warning: maxmtu having invalid value (%d)\n",
4071                          __func__, priv->plat->maxmtu);
4072
4073         if (flow_ctrl)
4074                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4075
4076         /* Rx Watchdog is available in the COREs newer than the 3.40.
4077          * In some case, for example on bugged HW this feature
4078          * has to be disable and this can be done by passing the
4079          * riwt_off field from the platform.
4080          */
4081         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4082                 priv->use_riwt = 1;
4083                 dev_info(priv->device,
4084                          "Enable RX Mitigation via HW Watchdog Timer\n");
4085         }
4086
4087         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
4088
4089         spin_lock_init(&priv->lock);
4090
4091         /* If a specific clk_csr value is passed from the platform
4092          * this means that the CSR Clock Range selection cannot be
4093          * changed at run-time and it is fixed. Viceversa the driver'll try to
4094          * set the MDC clock dynamically according to the csr actual
4095          * clock input.
4096          */
4097         if (!priv->plat->clk_csr)
4098                 stmmac_clk_csr_set(priv);
4099         else
4100                 priv->clk_csr = priv->plat->clk_csr;
4101
4102         stmmac_check_pcs_mode(priv);
4103
4104         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4105             priv->hw->pcs != STMMAC_PCS_TBI &&
4106             priv->hw->pcs != STMMAC_PCS_RTBI) {
4107                 /* MDIO bus Registration */
4108                 ret = stmmac_mdio_register(ndev);
4109                 if (ret < 0) {
4110                         dev_err(priv->device,
4111                                 "%s: MDIO bus (id: %d) registration failed",
4112                                 __func__, priv->plat->bus_id);
4113                         goto error_mdio_register;
4114                 }
4115         }
4116
4117         ret = register_netdev(ndev);
4118         if (ret) {
4119                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4120                         __func__, ret);
4121                 goto error_netdev_register;
4122         }
4123
4124         return ret;
4125
4126 error_netdev_register:
4127         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4128             priv->hw->pcs != STMMAC_PCS_TBI &&
4129             priv->hw->pcs != STMMAC_PCS_RTBI)
4130                 stmmac_mdio_unregister(ndev);
4131 error_mdio_register:
4132         netif_napi_del(&priv->napi);
4133 error_hw_init:
4134         free_netdev(ndev);
4135
4136         return ret;
4137 }
4138 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4139
4140 /**
4141  * stmmac_dvr_remove
4142  * @dev: device pointer
4143  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4144  * changes the link status, releases the DMA descriptor rings.
4145  */
4146 int stmmac_dvr_remove(struct device *dev)
4147 {
4148         struct net_device *ndev = dev_get_drvdata(dev);
4149         struct stmmac_priv *priv = netdev_priv(ndev);
4150
4151         netdev_info(priv->dev, "%s: removing driver", __func__);
4152
4153         stmmac_stop_all_dma(priv);
4154
4155         priv->hw->mac->set_mac(priv->ioaddr, false);
4156         netif_carrier_off(ndev);
4157         unregister_netdev(ndev);
4158         if (priv->plat->stmmac_rst)
4159                 reset_control_assert(priv->plat->stmmac_rst);
4160         clk_disable_unprepare(priv->plat->pclk);
4161         clk_disable_unprepare(priv->plat->stmmac_clk);
4162         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4163             priv->hw->pcs != STMMAC_PCS_TBI &&
4164             priv->hw->pcs != STMMAC_PCS_RTBI)
4165                 stmmac_mdio_unregister(ndev);
4166         free_netdev(ndev);
4167
4168         return 0;
4169 }
4170 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4171
4172 /**
4173  * stmmac_suspend - suspend callback
4174  * @dev: device pointer
4175  * Description: this is the function to suspend the device and it is called
4176  * by the platform driver to stop the network queue, release the resources,
4177  * program the PMT register (for WoL), clean and release driver resources.
4178  */
4179 int stmmac_suspend(struct device *dev)
4180 {
4181         struct net_device *ndev = dev_get_drvdata(dev);
4182         struct stmmac_priv *priv = netdev_priv(ndev);
4183         unsigned long flags;
4184
4185         if (!ndev || !netif_running(ndev))
4186                 return 0;
4187
4188         if (ndev->phydev)
4189                 phy_stop(ndev->phydev);
4190
4191         spin_lock_irqsave(&priv->lock, flags);
4192
4193         netif_device_detach(ndev);
4194         netif_stop_queue(ndev);
4195
4196         napi_disable(&priv->napi);
4197
4198         /* Stop TX/RX DMA */
4199         stmmac_stop_all_dma(priv);
4200
4201         /* Enable Power down mode by programming the PMT regs */
4202         if (device_may_wakeup(priv->device)) {
4203                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4204                 priv->irq_wake = 1;
4205         } else {
4206                 priv->hw->mac->set_mac(priv->ioaddr, false);
4207                 pinctrl_pm_select_sleep_state(priv->device);
4208                 /* Disable clock in case of PWM is off */
4209                 clk_disable(priv->plat->pclk);
4210                 clk_disable(priv->plat->stmmac_clk);
4211         }
4212         spin_unlock_irqrestore(&priv->lock, flags);
4213
4214         priv->oldlink = 0;
4215         priv->speed = SPEED_UNKNOWN;
4216         priv->oldduplex = DUPLEX_UNKNOWN;
4217         return 0;
4218 }
4219 EXPORT_SYMBOL_GPL(stmmac_suspend);
4220
4221 /**
4222  * stmmac_reset_queues_param - reset queue parameters
4223  * @dev: device pointer
4224  */
4225 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4226 {
4227         u32 rx_cnt = priv->plat->rx_queues_to_use;
4228         u32 tx_cnt = priv->plat->tx_queues_to_use;
4229         u32 queue;
4230
4231         for (queue = 0; queue < rx_cnt; queue++) {
4232                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4233
4234                 rx_q->cur_rx = 0;
4235                 rx_q->dirty_rx = 0;
4236         }
4237
4238         for (queue = 0; queue < tx_cnt; queue++) {
4239                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4240
4241                 tx_q->cur_tx = 0;
4242                 tx_q->dirty_tx = 0;
4243         }
4244 }
4245
4246 /**
4247  * stmmac_resume - resume callback
4248  * @dev: device pointer
4249  * Description: when resume this function is invoked to setup the DMA and CORE
4250  * in a usable state.
4251  */
4252 int stmmac_resume(struct device *dev)
4253 {
4254         struct net_device *ndev = dev_get_drvdata(dev);
4255         struct stmmac_priv *priv = netdev_priv(ndev);
4256         unsigned long flags;
4257
4258         if (!netif_running(ndev))
4259                 return 0;
4260
4261         /* Power Down bit, into the PM register, is cleared
4262          * automatically as soon as a magic packet or a Wake-up frame
4263          * is received. Anyway, it's better to manually clear
4264          * this bit because it can generate problems while resuming
4265          * from another devices (e.g. serial console).
4266          */
4267         if (device_may_wakeup(priv->device)) {
4268                 spin_lock_irqsave(&priv->lock, flags);
4269                 priv->hw->mac->pmt(priv->hw, 0);
4270                 spin_unlock_irqrestore(&priv->lock, flags);
4271                 priv->irq_wake = 0;
4272         } else {
4273                 pinctrl_pm_select_default_state(priv->device);
4274                 /* enable the clk previously disabled */
4275                 clk_enable(priv->plat->stmmac_clk);
4276                 clk_enable(priv->plat->pclk);
4277                 /* reset the phy so that it's ready */
4278                 if (priv->mii)
4279                         stmmac_mdio_reset(priv->mii);
4280         }
4281
4282         netif_device_attach(ndev);
4283
4284         spin_lock_irqsave(&priv->lock, flags);
4285
4286         stmmac_reset_queues_param(priv);
4287
4288         /* reset private mss value to force mss context settings at
4289          * next tso xmit (only used for gmac4).
4290          */
4291         priv->mss = 0;
4292
4293         stmmac_clear_descriptors(priv);
4294
4295         stmmac_hw_setup(ndev, false);
4296         stmmac_init_tx_coalesce(priv);
4297         stmmac_set_rx_mode(ndev);
4298
4299         napi_enable(&priv->napi);
4300
4301         netif_start_queue(ndev);
4302
4303         spin_unlock_irqrestore(&priv->lock, flags);
4304
4305         if (ndev->phydev)
4306                 phy_start(ndev->phydev);
4307
4308         return 0;
4309 }
4310 EXPORT_SYMBOL_GPL(stmmac_resume);
4311
4312 #ifndef MODULE
4313 static int __init stmmac_cmdline_opt(char *str)
4314 {
4315         char *opt;
4316
4317         if (!str || !*str)
4318                 return -EINVAL;
4319         while ((opt = strsep(&str, ",")) != NULL) {
4320                 if (!strncmp(opt, "debug:", 6)) {
4321                         if (kstrtoint(opt + 6, 0, &debug))
4322                                 goto err;
4323                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4324                         if (kstrtoint(opt + 8, 0, &phyaddr))
4325                                 goto err;
4326                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4327                         if (kstrtoint(opt + 7, 0, &buf_sz))
4328                                 goto err;
4329                 } else if (!strncmp(opt, "tc:", 3)) {
4330                         if (kstrtoint(opt + 3, 0, &tc))
4331                                 goto err;
4332                 } else if (!strncmp(opt, "watchdog:", 9)) {
4333                         if (kstrtoint(opt + 9, 0, &watchdog))
4334                                 goto err;
4335                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4336                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4337                                 goto err;
4338                 } else if (!strncmp(opt, "pause:", 6)) {
4339                         if (kstrtoint(opt + 6, 0, &pause))
4340                                 goto err;
4341                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4342                         if (kstrtoint(opt + 10, 0, &eee_timer))
4343                                 goto err;
4344                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4345                         if (kstrtoint(opt + 11, 0, &chain_mode))
4346                                 goto err;
4347                 }
4348         }
4349         return 0;
4350
4351 err:
4352         pr_err("%s: ERROR broken module parameter conversion", __func__);
4353         return -EINVAL;
4354 }
4355
4356 __setup("stmmaceth=", stmmac_cmdline_opt);
4357 #endif /* MODULE */
4358
4359 static int __init stmmac_init(void)
4360 {
4361 #ifdef CONFIG_DEBUG_FS
4362         /* Create debugfs main directory if it doesn't exist yet */
4363         if (!stmmac_fs_dir) {
4364                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4365
4366                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4367                         pr_err("ERROR %s, debugfs create directory failed\n",
4368                                STMMAC_RESOURCE_NAME);
4369
4370                         return -ENOMEM;
4371                 }
4372         }
4373 #endif
4374
4375         return 0;
4376 }
4377
4378 static void __exit stmmac_exit(void)
4379 {
4380 #ifdef CONFIG_DEBUG_FS
4381         debugfs_remove_recursive(stmmac_fs_dir);
4382 #endif
4383 }
4384
4385 module_init(stmmac_init)
4386 module_exit(stmmac_exit)
4387
4388 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4389 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4390 MODULE_LICENSE("GPL");