]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Always enable MAC RX queues
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 /**
189  * stmmac_tx_avail - Get tx queue availability
190  * @priv: driver private structure
191  * @queue: TX queue index
192  */
193 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
194 {
195         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
196         u32 avail;
197
198         if (tx_q->dirty_tx > tx_q->cur_tx)
199                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
200         else
201                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
202
203         return avail;
204 }
205
206 /**
207  * stmmac_rx_dirty - Get RX queue dirty
208  * @priv: driver private structure
209  * @queue: RX queue index
210  */
211 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
212 {
213         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
214         u32 dirty;
215
216         if (rx_q->dirty_rx <= rx_q->cur_rx)
217                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
218         else
219                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
220
221         return dirty;
222 }
223
224 /**
225  * stmmac_hw_fix_mac_speed - callback for speed selection
226  * @priv: driver private structure
227  * Description: on some platforms (e.g. ST), some HW system configuration
228  * registers have to be set according to the link speed negotiated.
229  */
230 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
231 {
232         struct net_device *ndev = priv->dev;
233         struct phy_device *phydev = ndev->phydev;
234
235         if (likely(priv->plat->fix_mac_speed))
236                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
237 }
238
239 /**
240  * stmmac_enable_eee_mode - check and enter in LPI mode
241  * @priv: driver private structure
242  * Description: this function is to verify and enter in LPI mode in case of
243  * EEE.
244  */
245 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
246 {
247         u32 tx_cnt = priv->plat->tx_queues_to_use;
248         u32 queue;
249
250         /* check if all TX queues have the work finished */
251         for (queue = 0; queue < tx_cnt; queue++) {
252                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
253
254                 if (tx_q->dirty_tx != tx_q->cur_tx)
255                         return; /* still unfinished work */
256         }
257
258         /* Check and enter in LPI mode */
259         if (!priv->tx_path_in_lpi_mode)
260                 priv->hw->mac->set_eee_mode(priv->hw,
261                                             priv->plat->en_tx_lpi_clockgating);
262 }
263
264 /**
265  * stmmac_disable_eee_mode - disable and exit from LPI mode
266  * @priv: driver private structure
267  * Description: this function is to exit and disable EEE in case of
268  * LPI state is true. This is called by the xmit.
269  */
270 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
271 {
272         priv->hw->mac->reset_eee_mode(priv->hw);
273         del_timer_sync(&priv->eee_ctrl_timer);
274         priv->tx_path_in_lpi_mode = false;
275 }
276
277 /**
278  * stmmac_eee_ctrl_timer - EEE TX SW timer.
279  * @arg : data hook
280  * Description:
281  *  if there is no data transfer and if we are not in LPI state,
282  *  then MAC Transmitter can be moved to LPI state.
283  */
284 static void stmmac_eee_ctrl_timer(unsigned long arg)
285 {
286         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
287
288         stmmac_enable_eee_mode(priv);
289         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
290 }
291
292 /**
293  * stmmac_eee_init - init EEE
294  * @priv: driver private structure
295  * Description:
296  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
297  *  can also manage EEE, this function enable the LPI state and start related
298  *  timer.
299  */
300 bool stmmac_eee_init(struct stmmac_priv *priv)
301 {
302         struct net_device *ndev = priv->dev;
303         unsigned long flags;
304         bool ret = false;
305
306         /* Using PCS we cannot dial with the phy registers at this stage
307          * so we do not support extra feature like EEE.
308          */
309         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
310             (priv->hw->pcs == STMMAC_PCS_TBI) ||
311             (priv->hw->pcs == STMMAC_PCS_RTBI))
312                 goto out;
313
314         /* MAC core supports the EEE feature. */
315         if (priv->dma_cap.eee) {
316                 int tx_lpi_timer = priv->tx_lpi_timer;
317
318                 /* Check if the PHY supports EEE */
319                 if (phy_init_eee(ndev->phydev, 1)) {
320                         /* To manage at run-time if the EEE cannot be supported
321                          * anymore (for example because the lp caps have been
322                          * changed).
323                          * In that case the driver disable own timers.
324                          */
325                         spin_lock_irqsave(&priv->lock, flags);
326                         if (priv->eee_active) {
327                                 netdev_dbg(priv->dev, "disable EEE\n");
328                                 del_timer_sync(&priv->eee_ctrl_timer);
329                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
330                                                              tx_lpi_timer);
331                         }
332                         priv->eee_active = 0;
333                         spin_unlock_irqrestore(&priv->lock, flags);
334                         goto out;
335                 }
336                 /* Activate the EEE and start timers */
337                 spin_lock_irqsave(&priv->lock, flags);
338                 if (!priv->eee_active) {
339                         priv->eee_active = 1;
340                         setup_timer(&priv->eee_ctrl_timer,
341                                     stmmac_eee_ctrl_timer,
342                                     (unsigned long)priv);
343                         mod_timer(&priv->eee_ctrl_timer,
344                                   STMMAC_LPI_T(eee_timer));
345
346                         priv->hw->mac->set_eee_timer(priv->hw,
347                                                      STMMAC_DEFAULT_LIT_LS,
348                                                      tx_lpi_timer);
349                 }
350                 /* Set HW EEE according to the speed */
351                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
352
353                 ret = true;
354                 spin_unlock_irqrestore(&priv->lock, flags);
355
356                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
357         }
358 out:
359         return ret;
360 }
361
362 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
363  * @priv: driver private structure
364  * @p : descriptor pointer
365  * @skb : the socket buffer
366  * Description :
367  * This function will read timestamp from the descriptor & pass it to stack.
368  * and also perform some sanity checks.
369  */
370 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
371                                    struct dma_desc *p, struct sk_buff *skb)
372 {
373         struct skb_shared_hwtstamps shhwtstamp;
374         u64 ns;
375
376         if (!priv->hwts_tx_en)
377                 return;
378
379         /* exit if skb doesn't support hw tstamp */
380         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
381                 return;
382
383         /* check tx tstamp status */
384         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
385                 /* get the valid tstamp */
386                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
387
388                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
389                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
390
391                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
392                 /* pass tstamp to stack */
393                 skb_tstamp_tx(skb, &shhwtstamp);
394         }
395
396         return;
397 }
398
399 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
400  * @priv: driver private structure
401  * @p : descriptor pointer
402  * @np : next descriptor pointer
403  * @skb : the socket buffer
404  * Description :
405  * This function will read received packet's timestamp from the descriptor
406  * and pass it to stack. It also perform some sanity checks.
407  */
408 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
409                                    struct dma_desc *np, struct sk_buff *skb)
410 {
411         struct skb_shared_hwtstamps *shhwtstamp = NULL;
412         u64 ns;
413
414         if (!priv->hwts_rx_en)
415                 return;
416
417         /* Check if timestamp is available */
418         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
419                 /* For GMAC4, the valid timestamp is from CTX next desc. */
420                 if (priv->plat->has_gmac4)
421                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
422                 else
423                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
424
425                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
426                 shhwtstamp = skb_hwtstamps(skb);
427                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
428                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
429         } else  {
430                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
431         }
432 }
433
434 /**
435  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
436  *  @dev: device pointer.
437  *  @ifr: An IOCTL specific structure, that can contain a pointer to
438  *  a proprietary structure used to pass information to the driver.
439  *  Description:
440  *  This function configures the MAC to enable/disable both outgoing(TX)
441  *  and incoming(RX) packets time stamping based on user input.
442  *  Return Value:
443  *  0 on success and an appropriate -ve integer on failure.
444  */
445 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
446 {
447         struct stmmac_priv *priv = netdev_priv(dev);
448         struct hwtstamp_config config;
449         struct timespec64 now;
450         u64 temp = 0;
451         u32 ptp_v2 = 0;
452         u32 tstamp_all = 0;
453         u32 ptp_over_ipv4_udp = 0;
454         u32 ptp_over_ipv6_udp = 0;
455         u32 ptp_over_ethernet = 0;
456         u32 snap_type_sel = 0;
457         u32 ts_master_en = 0;
458         u32 ts_event_en = 0;
459         u32 value = 0;
460         u32 sec_inc;
461
462         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
463                 netdev_alert(priv->dev, "No support for HW time stamping\n");
464                 priv->hwts_tx_en = 0;
465                 priv->hwts_rx_en = 0;
466
467                 return -EOPNOTSUPP;
468         }
469
470         if (copy_from_user(&config, ifr->ifr_data,
471                            sizeof(struct hwtstamp_config)))
472                 return -EFAULT;
473
474         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
475                    __func__, config.flags, config.tx_type, config.rx_filter);
476
477         /* reserved for future extensions */
478         if (config.flags)
479                 return -EINVAL;
480
481         if (config.tx_type != HWTSTAMP_TX_OFF &&
482             config.tx_type != HWTSTAMP_TX_ON)
483                 return -ERANGE;
484
485         if (priv->adv_ts) {
486                 switch (config.rx_filter) {
487                 case HWTSTAMP_FILTER_NONE:
488                         /* time stamp no incoming packet at all */
489                         config.rx_filter = HWTSTAMP_FILTER_NONE;
490                         break;
491
492                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
493                         /* PTP v1, UDP, any kind of event packet */
494                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
495                         /* take time stamp for all event messages */
496                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
497
498                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
499                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
500                         break;
501
502                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
503                         /* PTP v1, UDP, Sync packet */
504                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
505                         /* take time stamp for SYNC messages only */
506                         ts_event_en = PTP_TCR_TSEVNTENA;
507
508                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510                         break;
511
512                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
513                         /* PTP v1, UDP, Delay_req packet */
514                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
515                         /* take time stamp for Delay_Req messages only */
516                         ts_master_en = PTP_TCR_TSMSTRENA;
517                         ts_event_en = PTP_TCR_TSEVNTENA;
518
519                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521                         break;
522
523                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
524                         /* PTP v2, UDP, any kind of event packet */
525                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
526                         ptp_v2 = PTP_TCR_TSVER2ENA;
527                         /* take time stamp for all event messages */
528                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529
530                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
531                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
532                         break;
533
534                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
535                         /* PTP v2, UDP, Sync packet */
536                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
537                         ptp_v2 = PTP_TCR_TSVER2ENA;
538                         /* take time stamp for SYNC messages only */
539                         ts_event_en = PTP_TCR_TSEVNTENA;
540
541                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
542                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
543                         break;
544
545                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
546                         /* PTP v2, UDP, Delay_req packet */
547                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
548                         ptp_v2 = PTP_TCR_TSVER2ENA;
549                         /* take time stamp for Delay_Req messages only */
550                         ts_master_en = PTP_TCR_TSMSTRENA;
551                         ts_event_en = PTP_TCR_TSEVNTENA;
552
553                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
554                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
555                         break;
556
557                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
558                         /* PTP v2/802.AS1 any layer, any kind of event packet */
559                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
560                         ptp_v2 = PTP_TCR_TSVER2ENA;
561                         /* take time stamp for all event messages */
562                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
563
564                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
565                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
566                         ptp_over_ethernet = PTP_TCR_TSIPENA;
567                         break;
568
569                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
570                         /* PTP v2/802.AS1, any layer, Sync packet */
571                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
572                         ptp_v2 = PTP_TCR_TSVER2ENA;
573                         /* take time stamp for SYNC messages only */
574                         ts_event_en = PTP_TCR_TSEVNTENA;
575
576                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
577                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
578                         ptp_over_ethernet = PTP_TCR_TSIPENA;
579                         break;
580
581                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
582                         /* PTP v2/802.AS1, any layer, Delay_req packet */
583                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
584                         ptp_v2 = PTP_TCR_TSVER2ENA;
585                         /* take time stamp for Delay_Req messages only */
586                         ts_master_en = PTP_TCR_TSMSTRENA;
587                         ts_event_en = PTP_TCR_TSEVNTENA;
588
589                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
590                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
591                         ptp_over_ethernet = PTP_TCR_TSIPENA;
592                         break;
593
594                 case HWTSTAMP_FILTER_ALL:
595                         /* time stamp any incoming packet */
596                         config.rx_filter = HWTSTAMP_FILTER_ALL;
597                         tstamp_all = PTP_TCR_TSENALL;
598                         break;
599
600                 default:
601                         return -ERANGE;
602                 }
603         } else {
604                 switch (config.rx_filter) {
605                 case HWTSTAMP_FILTER_NONE:
606                         config.rx_filter = HWTSTAMP_FILTER_NONE;
607                         break;
608                 default:
609                         /* PTP v1, UDP, any kind of event packet */
610                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
611                         break;
612                 }
613         }
614         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
615         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
616
617         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
618                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
619         else {
620                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
621                          tstamp_all | ptp_v2 | ptp_over_ethernet |
622                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
623                          ts_master_en | snap_type_sel);
624                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
625
626                 /* program Sub Second Increment reg */
627                 sec_inc = priv->hw->ptp->config_sub_second_increment(
628                         priv->ptpaddr, priv->plat->clk_ptp_rate,
629                         priv->plat->has_gmac4);
630                 temp = div_u64(1000000000ULL, sec_inc);
631
632                 /* calculate default added value:
633                  * formula is :
634                  * addend = (2^32)/freq_div_ratio;
635                  * where, freq_div_ratio = 1e9ns/sec_inc
636                  */
637                 temp = (u64)(temp << 32);
638                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
639                 priv->hw->ptp->config_addend(priv->ptpaddr,
640                                              priv->default_addend);
641
642                 /* initialize system time */
643                 ktime_get_real_ts64(&now);
644
645                 /* lower 32 bits of tv_sec are safe until y2106 */
646                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
647                                             now.tv_nsec);
648         }
649
650         return copy_to_user(ifr->ifr_data, &config,
651                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
652 }
653
654 /**
655  * stmmac_init_ptp - init PTP
656  * @priv: driver private structure
657  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
658  * This is done by looking at the HW cap. register.
659  * This function also registers the ptp driver.
660  */
661 static int stmmac_init_ptp(struct stmmac_priv *priv)
662 {
663         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
664                 return -EOPNOTSUPP;
665
666         priv->adv_ts = 0;
667         /* Check if adv_ts can be enabled for dwmac 4.x core */
668         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
669                 priv->adv_ts = 1;
670         /* Dwmac 3.x core with extend_desc can support adv_ts */
671         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
672                 priv->adv_ts = 1;
673
674         if (priv->dma_cap.time_stamp)
675                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
676
677         if (priv->adv_ts)
678                 netdev_info(priv->dev,
679                             "IEEE 1588-2008 Advanced Timestamp supported\n");
680
681         priv->hw->ptp = &stmmac_ptp;
682         priv->hwts_tx_en = 0;
683         priv->hwts_rx_en = 0;
684
685         stmmac_ptp_register(priv);
686
687         return 0;
688 }
689
690 static void stmmac_release_ptp(struct stmmac_priv *priv)
691 {
692         if (priv->plat->clk_ptp_ref)
693                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
694         stmmac_ptp_unregister(priv);
695 }
696
697 /**
698  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
699  *  @priv: driver private structure
700  *  Description: It is used for configuring the flow control in all queues
701  */
702 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
703 {
704         u32 tx_cnt = priv->plat->tx_queues_to_use;
705
706         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
707                                  priv->pause, tx_cnt);
708 }
709
710 /**
711  * stmmac_adjust_link - adjusts the link parameters
712  * @dev: net device structure
713  * Description: this is the helper called by the physical abstraction layer
714  * drivers to communicate the phy link status. According the speed and duplex
715  * this driver can invoke registered glue-logic as well.
716  * It also invoke the eee initialization because it could happen when switch
717  * on different networks (that are eee capable).
718  */
719 static void stmmac_adjust_link(struct net_device *dev)
720 {
721         struct stmmac_priv *priv = netdev_priv(dev);
722         struct phy_device *phydev = dev->phydev;
723         unsigned long flags;
724         int new_state = 0;
725
726         if (!phydev)
727                 return;
728
729         spin_lock_irqsave(&priv->lock, flags);
730
731         if (phydev->link) {
732                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
733
734                 /* Now we make sure that we can be in full duplex mode.
735                  * If not, we operate in half-duplex mode. */
736                 if (phydev->duplex != priv->oldduplex) {
737                         new_state = 1;
738                         if (!(phydev->duplex))
739                                 ctrl &= ~priv->hw->link.duplex;
740                         else
741                                 ctrl |= priv->hw->link.duplex;
742                         priv->oldduplex = phydev->duplex;
743                 }
744                 /* Flow Control operation */
745                 if (phydev->pause)
746                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
747
748                 if (phydev->speed != priv->speed) {
749                         new_state = 1;
750                         switch (phydev->speed) {
751                         case 1000:
752                                 if (priv->plat->has_gmac ||
753                                     priv->plat->has_gmac4)
754                                         ctrl &= ~priv->hw->link.port;
755                                 break;
756                         case 100:
757                                 if (priv->plat->has_gmac ||
758                                     priv->plat->has_gmac4) {
759                                         ctrl |= priv->hw->link.port;
760                                         ctrl |= priv->hw->link.speed;
761                                 } else {
762                                         ctrl &= ~priv->hw->link.port;
763                                 }
764                                 break;
765                         case 10:
766                                 if (priv->plat->has_gmac ||
767                                     priv->plat->has_gmac4) {
768                                         ctrl |= priv->hw->link.port;
769                                         ctrl &= ~(priv->hw->link.speed);
770                                 } else {
771                                         ctrl &= ~priv->hw->link.port;
772                                 }
773                                 break;
774                         default:
775                                 netif_warn(priv, link, priv->dev,
776                                            "broken speed: %d\n", phydev->speed);
777                                 phydev->speed = SPEED_UNKNOWN;
778                                 break;
779                         }
780                         if (phydev->speed != SPEED_UNKNOWN)
781                                 stmmac_hw_fix_mac_speed(priv);
782                         priv->speed = phydev->speed;
783                 }
784
785                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
786
787                 if (!priv->oldlink) {
788                         new_state = 1;
789                         priv->oldlink = 1;
790                 }
791         } else if (priv->oldlink) {
792                 new_state = 1;
793                 priv->oldlink = 0;
794                 priv->speed = SPEED_UNKNOWN;
795                 priv->oldduplex = DUPLEX_UNKNOWN;
796         }
797
798         if (new_state && netif_msg_link(priv))
799                 phy_print_status(phydev);
800
801         spin_unlock_irqrestore(&priv->lock, flags);
802
803         if (phydev->is_pseudo_fixed_link)
804                 /* Stop PHY layer to call the hook to adjust the link in case
805                  * of a switch is attached to the stmmac driver.
806                  */
807                 phydev->irq = PHY_IGNORE_INTERRUPT;
808         else
809                 /* At this stage, init the EEE if supported.
810                  * Never called in case of fixed_link.
811                  */
812                 priv->eee_enabled = stmmac_eee_init(priv);
813 }
814
815 /**
816  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
817  * @priv: driver private structure
818  * Description: this is to verify if the HW supports the PCS.
819  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
820  * configured for the TBI, RTBI, or SGMII PHY interface.
821  */
822 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
823 {
824         int interface = priv->plat->interface;
825
826         if (priv->dma_cap.pcs) {
827                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
828                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
829                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
830                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
831                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
832                         priv->hw->pcs = STMMAC_PCS_RGMII;
833                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
834                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
835                         priv->hw->pcs = STMMAC_PCS_SGMII;
836                 }
837         }
838 }
839
840 /**
841  * stmmac_init_phy - PHY initialization
842  * @dev: net device structure
843  * Description: it initializes the driver's PHY state, and attaches the PHY
844  * to the mac driver.
845  *  Return value:
846  *  0 on success
847  */
848 static int stmmac_init_phy(struct net_device *dev)
849 {
850         struct stmmac_priv *priv = netdev_priv(dev);
851         struct phy_device *phydev;
852         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
853         char bus_id[MII_BUS_ID_SIZE];
854         int interface = priv->plat->interface;
855         int max_speed = priv->plat->max_speed;
856         priv->oldlink = 0;
857         priv->speed = SPEED_UNKNOWN;
858         priv->oldduplex = DUPLEX_UNKNOWN;
859
860         if (priv->plat->phy_node) {
861                 phydev = of_phy_connect(dev, priv->plat->phy_node,
862                                         &stmmac_adjust_link, 0, interface);
863         } else {
864                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
865                          priv->plat->bus_id);
866
867                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
868                          priv->plat->phy_addr);
869                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
870                            phy_id_fmt);
871
872                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
873                                      interface);
874         }
875
876         if (IS_ERR_OR_NULL(phydev)) {
877                 netdev_err(priv->dev, "Could not attach to PHY\n");
878                 if (!phydev)
879                         return -ENODEV;
880
881                 return PTR_ERR(phydev);
882         }
883
884         /* Stop Advertising 1000BASE Capability if interface is not GMII */
885         if ((interface == PHY_INTERFACE_MODE_MII) ||
886             (interface == PHY_INTERFACE_MODE_RMII) ||
887                 (max_speed < 1000 && max_speed > 0))
888                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
889                                          SUPPORTED_1000baseT_Full);
890
891         /*
892          * Broken HW is sometimes missing the pull-up resistor on the
893          * MDIO line, which results in reads to non-existent devices returning
894          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
895          * device as well.
896          * Note: phydev->phy_id is the result of reading the UID PHY registers.
897          */
898         if (!priv->plat->phy_node && phydev->phy_id == 0) {
899                 phy_disconnect(phydev);
900                 return -ENODEV;
901         }
902
903         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
904          * subsequent PHY polling, make sure we force a link transition if
905          * we have a UP/DOWN/UP transition
906          */
907         if (phydev->is_pseudo_fixed_link)
908                 phydev->irq = PHY_POLL;
909
910         phy_attached_info(phydev);
911         return 0;
912 }
913
914 static void stmmac_display_rings(struct stmmac_priv *priv)
915 {
916         u32 rx_cnt = priv->plat->rx_queues_to_use;
917         u32 tx_cnt = priv->plat->tx_queues_to_use;
918         void *head_rx, *head_tx;
919         u32 queue;
920
921         /* Display RX rings */
922         for (queue = 0; queue < rx_cnt; queue++) {
923                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
924
925                 pr_info("\tRX Queue %d rings\n", queue);
926
927                 if (priv->extend_desc)
928                         head_rx = (void *)rx_q->dma_erx;
929                 else
930                         head_rx = (void *)rx_q->dma_rx;
931
932                 /* Display Rx ring */
933                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
934         }
935
936         /* Display TX rings */
937         for (queue = 0; queue < tx_cnt; queue++) {
938                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
939
940                 pr_info("\tTX Queue %d rings\n", queue);
941
942                 if (priv->extend_desc)
943                         head_tx = (void *)tx_q->dma_etx;
944                 else
945                         head_tx = (void *)tx_q->dma_tx;
946
947                 /* Display Tx ring */
948                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
949         }
950 }
951
952 static int stmmac_set_bfsize(int mtu, int bufsize)
953 {
954         int ret = bufsize;
955
956         if (mtu >= BUF_SIZE_4KiB)
957                 ret = BUF_SIZE_8KiB;
958         else if (mtu >= BUF_SIZE_2KiB)
959                 ret = BUF_SIZE_4KiB;
960         else if (mtu > DEFAULT_BUFSIZE)
961                 ret = BUF_SIZE_2KiB;
962         else
963                 ret = DEFAULT_BUFSIZE;
964
965         return ret;
966 }
967
968 /**
969  * stmmac_clear_rx_descriptors - clear the descriptors of a RX queue
970  * @priv: driver private structure
971  * @queue: RX queue index
972  * Description: this function is called to clear the RX descriptors
973  * in case of both basic and extended descriptors are used.
974  */
975 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
976 {
977         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
978         u32 i = 0;
979
980         /* Clear the RX descriptors */
981         for (i = 0; i < DMA_RX_SIZE; i++)
982                 if (priv->extend_desc)
983                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
984                                                      priv->use_riwt, priv->mode,
985                                                      (i == DMA_RX_SIZE - 1));
986                 else
987                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
988                                                      priv->use_riwt, priv->mode,
989                                                      (i == DMA_RX_SIZE - 1));
990 }
991
992 /**
993  * stmmac_clear_tx_descriptors - clear the descriptors of a TX queue
994  * @priv: driver private structure
995  * @queue: TX queue index
996  * Description: this function is called to clear the TX descriptors
997  * in case of both basic and extended descriptors are used.
998  */
999 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1000 {
1001         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1002         u32 i = 0;
1003
1004         /* Clear the TX descriptors */
1005         for (i = 0; i < DMA_TX_SIZE; i++)
1006                 if (priv->extend_desc)
1007                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1008                                                      priv->mode,
1009                                                      (i == DMA_TX_SIZE - 1));
1010                 else
1011                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1012                                                      priv->mode,
1013                                                      (i == DMA_TX_SIZE - 1));
1014 }
1015
1016 /**
1017  * stmmac_clear_descriptors - clear descriptors
1018  * @priv: driver private structure
1019  * Description: this function is called to clear the tx and rx descriptors
1020  * in case of both basic and extended descriptors are used.
1021  */
1022 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1023 {
1024         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1025         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1026         u32 queue;
1027
1028         for (queue = 0; queue < rx_queue_cnt; queue++)
1029                 stmmac_clear_rx_descriptors(priv, queue);
1030
1031         for (queue = 0; queue < tx_queue_cnt; queue++)
1032                 stmmac_clear_tx_descriptors(priv, queue);
1033 }
1034
1035 /**
1036  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1037  * @priv: driver private structure
1038  * @p: descriptor pointer
1039  * @i: descriptor index
1040  * @flags: gfp flag.
1041  * @queue: RX queue index
1042  * Description: this function is called to allocate a receive buffer, perform
1043  * the DMA mapping and init the descriptor.
1044  */
1045 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1046                                   int i, gfp_t flags, u32 queue)
1047 {
1048         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1049         struct sk_buff *skb;
1050
1051         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1052         if (!skb) {
1053                 netdev_err(priv->dev,
1054                            "%s: Rx init fails; skb is NULL\n", __func__);
1055                 return -ENOMEM;
1056         }
1057         rx_q->rx_skbuff[i] = skb;
1058         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1059                                                 priv->dma_buf_sz,
1060                                                 DMA_FROM_DEVICE);
1061         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1062                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1063                 dev_kfree_skb_any(skb);
1064                 return -EINVAL;
1065         }
1066
1067         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1068                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1069         else
1070                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1071
1072         if ((priv->hw->mode->init_desc3) &&
1073             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1074                 priv->hw->mode->init_desc3(p);
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * stmmac_free_rx_buffers - free RX buffers.
1081  * @priv: driver private structure
1082  * @queue: RX queue index
1083  * @i: buffer index
1084  */
1085 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, u32 queue, int i)
1086 {
1087         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1088
1089         if (rx_q->rx_skbuff[i]) {
1090                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1091                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1092                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1093         }
1094         rx_q->rx_skbuff[i] = NULL;
1095 }
1096
1097 /**
1098  * stmmac_free_tx_buffers - free RX buffers.
1099  * @priv: driver private structure
1100  * @queue: RX queue index
1101  * @i: buffer index
1102  */
1103 static void stmmac_free_tx_buffers(struct stmmac_priv *priv, u32 queue, u32 i)
1104 {
1105         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1106
1107         if (tx_q->tx_skbuff_dma[i].buf) {
1108                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1109                         dma_unmap_page(priv->device,
1110                                        tx_q->tx_skbuff_dma[i].buf,
1111                                        tx_q->tx_skbuff_dma[i].len,
1112                                        DMA_TO_DEVICE);
1113                 else
1114                         dma_unmap_single(priv->device,
1115                                          tx_q->tx_skbuff_dma[i].buf,
1116                                          tx_q->tx_skbuff_dma[i].len,
1117                                          DMA_TO_DEVICE);
1118         }
1119
1120         if (tx_q->tx_skbuff[i]) {
1121                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1122                 tx_q->tx_skbuff[i] = NULL;
1123                 tx_q->tx_skbuff_dma[i].buf = 0;
1124                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1125         }
1126 }
1127
1128 /**
1129  * init_tx_dma_desc_rings - init the TX descriptor rings
1130  * @dev: net device structure
1131  * Description: this function initializes the DMA TX descriptors
1132  * and allocates the socket buffers. It suppors the chained and ring
1133  * modes.
1134  */
1135 static int init_tx_dma_desc_rings(struct net_device *dev)
1136 {
1137         struct stmmac_priv *priv = netdev_priv(dev);
1138         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1139         u32 queue;
1140         int i = 0;
1141
1142         for (queue = 0; queue < tx_queue_cnt; queue++) {
1143                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1144
1145                 netif_dbg(priv, probe, priv->dev,
1146                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1147                           (u32)tx_q->dma_tx_phy);
1148
1149                 /* Setup the chained descriptor addresses */
1150                 if (priv->mode == STMMAC_CHAIN_MODE) {
1151                         if (priv->extend_desc)
1152                                 priv->hw->mode->init(tx_q->dma_etx,
1153                                                      tx_q->dma_tx_phy,
1154                                                      DMA_TX_SIZE, 1);
1155                         else
1156                                 priv->hw->mode->init(tx_q->dma_tx,
1157                                                      tx_q->dma_tx_phy,
1158                                                      DMA_TX_SIZE, 0);
1159                 }
1160
1161                 for (i = 0; i < DMA_TX_SIZE; i++) {
1162                         struct dma_desc *p;
1163
1164                         if (priv->extend_desc)
1165                                 p = &((tx_q->dma_etx + i)->basic);
1166                         else
1167                                 p = tx_q->dma_tx + i;
1168
1169                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1170                                 p->des0 = 0;
1171                                 p->des1 = 0;
1172                                 p->des2 = 0;
1173                                 p->des3 = 0;
1174                         } else {
1175                                 p->des2 = 0;
1176                         }
1177
1178                         tx_q->tx_skbuff_dma[i].buf = 0;
1179                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1180                         tx_q->tx_skbuff_dma[i].len = 0;
1181                         tx_q->tx_skbuff_dma[i].last_segment = false;
1182                         tx_q->tx_skbuff[i] = NULL;
1183                 }
1184
1185                 tx_q->dirty_tx = 0;
1186                 tx_q->cur_tx = 0;
1187                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1188         }
1189
1190         return 0;
1191 }
1192
1193 /**
1194  * init_rx_dma_desc_rings - init the RX descriptor rings
1195  * @dev: net device structure
1196  * @flags: gfp flag.
1197  * Description: this function initializes the DMA RX descriptors
1198  * and allocates the socket buffers. It suppors the chained and ring
1199  * modes.
1200  */
1201 static int init_rx_dma_desc_rings(struct net_device *dev, gfp_t flags)
1202 {
1203         struct stmmac_priv *priv = netdev_priv(dev);
1204         u32 rx_count = priv->plat->rx_queues_to_use;
1205         unsigned int bfsize = 0;
1206         int ret = -ENOMEM;
1207         u32 queue;
1208         int i;
1209
1210         if (priv->hw->mode->set_16kib_bfsize)
1211                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1212
1213         if (bfsize < BUF_SIZE_16KiB)
1214                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1215
1216         priv->dma_buf_sz = bfsize;
1217
1218         /* RX INITIALIZATION */
1219         netif_dbg(priv, probe, priv->dev,
1220                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1221
1222         for (queue = 0; queue < rx_count; queue++) {
1223                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1224
1225                 netif_dbg(priv, probe, priv->dev,
1226                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1227                           (u32)rx_q->dma_rx_phy);
1228
1229                 for (i = 0; i < DMA_RX_SIZE; i++) {
1230                         struct dma_desc *p;
1231
1232                         if (priv->extend_desc)
1233                                 p = &((rx_q->dma_erx + i)->basic);
1234                         else
1235                                 p = rx_q->dma_rx + i;
1236
1237                         ret = stmmac_init_rx_buffers(priv, p, i, flags, queue);
1238                         if (ret)
1239                                 goto err_init_rx_buffers;
1240
1241                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1242                                   rx_q->rx_skbuff[i],
1243                                   rx_q->rx_skbuff[i]->data,
1244                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1245                 }
1246
1247                 rx_q->cur_rx = 0;
1248                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1249
1250                 stmmac_clear_rx_descriptors(priv, queue);
1251
1252                 if (priv->mode == STMMAC_CHAIN_MODE) {
1253                         if (priv->extend_desc)
1254                                 priv->hw->mode->init(rx_q->dma_erx,
1255                                                      rx_q->dma_rx_phy,
1256                                                      DMA_RX_SIZE, 1);
1257                         else
1258                                 priv->hw->mode->init(rx_q->dma_rx,
1259                                                      rx_q->dma_rx_phy,
1260                                                      DMA_RX_SIZE, 0);
1261                 }
1262         }
1263
1264         buf_sz = bfsize;
1265
1266         return 0;
1267
1268 err_init_rx_buffers:
1269         while (queue-- >= 0) {
1270                 while (--i >= 0)
1271                         stmmac_free_rx_buffers(priv, queue, i);
1272
1273                 i = DMA_RX_SIZE;
1274         }
1275
1276         return ret;
1277 }
1278
1279 /**
1280  * init_dma_desc_rings - init the RX/TX descriptor rings
1281  * @dev: net device structure
1282  * @flags: gfp flag.
1283  * Description: this function initializes the DMA RX/TX descriptors
1284  * and allocates the socket buffers. It suppors the chained and ring
1285  * modes.
1286  */
1287 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1288 {
1289         struct stmmac_priv *priv = netdev_priv(dev);
1290         int ret = init_rx_dma_desc_rings(dev, flags);
1291
1292         if (ret)
1293                 return ret;
1294
1295         ret = init_tx_dma_desc_rings(dev);
1296
1297         if (netif_msg_hw(priv))
1298                 stmmac_display_rings(priv);
1299
1300         return ret;
1301 }
1302
1303 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1304 {
1305         int i;
1306
1307         for (i = 0; i < DMA_RX_SIZE; i++)
1308                 stmmac_free_rx_buffers(priv, queue, i);
1309 }
1310
1311 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1312 {
1313         int i;
1314
1315         for (i = 0; i < DMA_TX_SIZE; i++)
1316                 stmmac_free_tx_buffers(priv, queue, i);
1317 }
1318
1319 /**
1320  * free_rx_dma_desc_resources - free RX DMA resources
1321  * @priv: driver private structure
1322  */
1323 static void free_rx_dma_desc_resources(struct stmmac_priv *priv)
1324 {
1325         u32 rx_count = priv->plat->rx_queues_to_use;
1326         u32 queue = 0;
1327
1328         if (!priv->rx_queue)
1329                 return;
1330
1331         /* Free RX queue resources */
1332         for (queue = 0; queue < rx_count; queue++) {
1333                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1334
1335                 if (!rx_q)
1336                         break;
1337
1338                 /* Release the DMA RX socket buffers */
1339                 dma_free_rx_skbufs(priv, queue);
1340
1341                 kfree(rx_q->rx_skbuff);
1342
1343                 kfree(rx_q->rx_skbuff_dma);
1344
1345                 if (!priv->extend_desc)
1346                         dma_free_coherent(priv->device,
1347                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1348                                           rx_q->dma_rx,
1349                                           rx_q->dma_rx_phy);
1350                 else
1351                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1352                                           sizeof(struct dma_extended_desc),
1353                                           rx_q->dma_erx,
1354                                           rx_q->dma_rx_phy);
1355         }
1356
1357         kfree(priv->rx_queue);
1358 }
1359
1360 /**
1361  * free_tx_dma_desc_resources - free TX DMA resources
1362  * @priv: driver private structure
1363  */
1364 static void free_tx_dma_desc_resources(struct stmmac_priv *priv)
1365 {
1366         u32 tx_count = priv->plat->tx_queues_to_use;
1367         u32 queue = 0;
1368
1369         if (!priv->tx_queue)
1370                 return;
1371
1372         /* Free TX queue resources */
1373         for (queue = 0; queue < tx_count; queue++) {
1374                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1375
1376                 if (!tx_q)
1377                         break;
1378
1379                 /* Release the DMA TX socket buffers */
1380                 dma_free_tx_skbufs(priv, queue);
1381
1382                 kfree(tx_q->tx_skbuff);
1383
1384                 kfree(tx_q->tx_skbuff_dma);
1385
1386                 if (!priv->extend_desc)
1387                         dma_free_coherent(priv->device,
1388                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1389                                           tx_q->dma_tx,
1390                                           tx_q->dma_tx_phy);
1391                 else
1392                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1393                                           sizeof(struct dma_extended_desc),
1394                                           tx_q->dma_etx,
1395                                           tx_q->dma_tx_phy);
1396         }
1397
1398         kfree(priv->tx_queue);
1399 }
1400
1401 /**
1402  * free_dma_desc_resources - free All DMA resources
1403  * @priv: driver private structure
1404  */
1405 static void free_dma_desc_resources(struct stmmac_priv *priv)
1406 {
1407         free_rx_dma_desc_resources(priv);
1408         free_tx_dma_desc_resources(priv);
1409 }
1410
1411 /**
1412  * alloc_rx_dma_desc_resources - alloc RX resources.
1413  * @priv: private structure
1414  * Description: according to which descriptor can be used (extend or basic)
1415  * this function allocates the resources for RX paths. It pre-allocates the
1416  * RX socket buffer in order to allow zero-copy mechanism.
1417  */
1418 static int alloc_rx_dma_desc_resources(struct stmmac_priv *priv)
1419 {
1420         u32 rx_count = priv->plat->rx_queues_to_use;
1421         int ret = -ENOMEM;
1422         u32 queue = 0;
1423
1424         /* Allocate RX queues array */
1425         priv->rx_queue = kmalloc_array(rx_count,
1426                                        sizeof(struct stmmac_rx_queue),
1427                                        GFP_KERNEL);
1428         if (!priv->rx_queue) {
1429                 kfree(priv->rx_queue);
1430                 return -ENOMEM;
1431         }
1432
1433         /* RX queues buffers and DMA */
1434         for (queue = 0; queue < rx_count; queue++) {
1435                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1436
1437                 rx_q->queue_index = queue;
1438                 rx_q->priv_data = priv;
1439
1440                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1441                                                         sizeof(dma_addr_t),
1442                                                         GFP_KERNEL);
1443                 if (!rx_q->rx_skbuff_dma)
1444                         goto err_dma_buffers;
1445
1446                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1447                                                     sizeof(struct sk_buff *),
1448                                                     GFP_KERNEL);
1449                 if (!rx_q->rx_skbuff)
1450                         goto err_dma_buffers;
1451
1452                 if (priv->extend_desc) {
1453                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1454                         (DMA_RX_SIZE * sizeof(struct dma_extended_desc)),
1455                         &rx_q->dma_rx_phy, GFP_KERNEL);
1456
1457                         if (!rx_q->dma_erx)
1458                                 goto err_dma_buffers;
1459                 } else {
1460                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1461                         (DMA_RX_SIZE * sizeof(struct dma_desc)),
1462                         &rx_q->dma_rx_phy, GFP_KERNEL);
1463
1464                         if (!rx_q->dma_rx)
1465                                 goto err_dma_buffers;
1466                 }
1467         }
1468
1469         return 0;
1470
1471 err_dma_buffers:
1472         free_rx_dma_desc_resources(priv);
1473
1474         return ret;
1475 }
1476
1477 /**
1478  * alloc_tx_dma_desc_resources - alloc TX resources.
1479  * @priv: private structure
1480  * Description: according to which descriptor can be used (extend or basic)
1481  * this function allocates the resources for TX paths.
1482  */
1483 static int alloc_tx_dma_desc_resources(struct stmmac_priv *priv)
1484 {
1485         u32 tx_count = priv->plat->tx_queues_to_use;
1486         int ret = -ENOMEM;
1487         u32 queue = 0;
1488
1489         /* Allocate TX queues array */
1490         priv->tx_queue = kmalloc_array(tx_count,
1491                                        sizeof(struct stmmac_tx_queue),
1492                                        GFP_KERNEL);
1493         if (!priv->tx_queue)
1494                 return -ENOMEM;
1495
1496         /* TX queues buffers and DMA */
1497         for (queue = 0; queue < tx_count; queue++) {
1498                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1499
1500                 tx_q->queue_index = queue;
1501                 tx_q->priv_data = priv;
1502
1503                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1504                                           sizeof(struct stmmac_tx_info),
1505                                           GFP_KERNEL);
1506
1507                 if (!tx_q->tx_skbuff_dma)
1508                         goto err_dma_buffers;
1509
1510                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1511                                                     sizeof(struct sk_buff *),
1512                                                     GFP_KERNEL);
1513                 if (!tx_q->tx_skbuff)
1514                         goto err_dma_buffers;
1515
1516                 if (priv->extend_desc) {
1517                         tx_q->dma_etx =
1518                         dma_zalloc_coherent(priv->device,
1519                         (DMA_TX_SIZE * sizeof(struct dma_extended_desc)),
1520                         &tx_q->dma_tx_phy, GFP_KERNEL);
1521
1522                         if (!tx_q->dma_etx)
1523                                 goto err_dma_buffers;
1524                 } else {
1525                         tx_q->dma_tx =
1526                         dma_zalloc_coherent(priv->device,
1527                         (DMA_TX_SIZE * sizeof(struct dma_desc)),
1528                         &tx_q->dma_tx_phy, GFP_KERNEL);
1529
1530                         if (!tx_q->dma_tx)
1531                                 goto err_dma_buffers;
1532                 }
1533         }
1534
1535         return 0;
1536
1537 err_dma_buffers:
1538         free_tx_dma_desc_resources(priv);
1539
1540         return ret;
1541 }
1542
1543 /**
1544  * alloc_dma_desc_resources - alloc TX/RX resources.
1545  * @priv: private structure
1546  * Description: according to which descriptor can be used (extend or basic)
1547  * this function allocates the resources for TX and RX paths. In case of
1548  * reception, for example, it pre-allocated the RX socket buffer in order to
1549  * allow zero-copy mechanism.
1550  */
1551 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1552 {
1553         int ret = 0;
1554
1555         ret = alloc_tx_dma_desc_resources(priv);
1556         if (ret)
1557                 return ret;
1558
1559         ret = alloc_rx_dma_desc_resources(priv);
1560
1561         return ret;
1562 }
1563
1564 /**
1565  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1566  *  @priv: driver private structure
1567  *  Description: It is used for enabling the rx queues in the MAC
1568  */
1569 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1570 {
1571         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1572         int queue;
1573         u8 mode;
1574
1575         for (queue = 0; queue < rx_queues_count; queue++) {
1576                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1577                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1578         }
1579 }
1580
1581 /**
1582  * stmmac_start_rx_dma - start RX DMA channel
1583  * @priv: driver private structure
1584  * @chan: RX channel index
1585  * Description:
1586  * This starts a RX DMA channel
1587  */
1588 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1589 {
1590         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1591         priv->hw->dma->start_rx(priv->ioaddr, chan);
1592 }
1593
1594 /**
1595  * stmmac_start_tx_dma - start TX DMA channel
1596  * @priv: driver private structure
1597  * @chan: TX channel index
1598  * Description:
1599  * This starts a TX DMA channel
1600  */
1601 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1602 {
1603         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1604         priv->hw->dma->start_tx(priv->ioaddr, chan);
1605 }
1606
1607 /**
1608  * stmmac_stop_rx_dma - stop RX DMA channel
1609  * @priv: driver private structure
1610  * @chan: RX channel index
1611  * Description:
1612  * This stops a RX DMA channel
1613  */
1614 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1615 {
1616         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1617         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1618 }
1619
1620 /**
1621  * stmmac_stop_tx_dma - stop TX DMA channel
1622  * @priv: driver private structure
1623  * @chan: TX channel index
1624  * Description:
1625  * This stops a TX DMA channel
1626  */
1627 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1628 {
1629         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1630         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1631 }
1632
1633 /**
1634  * stmmac_start_all_dma - start all RX and TX DMA channels
1635  * @priv: driver private structure
1636  * Description:
1637  * This starts all the RX and TX DMA channels
1638  */
1639 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1640 {
1641         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1642         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1643         u32 chan = 0;
1644
1645         for (chan = 0; chan < rx_channels_count; chan++)
1646                 stmmac_start_rx_dma(priv, chan);
1647
1648         for (chan = 0; chan < tx_channels_count; chan++)
1649                 stmmac_start_tx_dma(priv, chan);
1650 }
1651
1652 /**
1653  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1654  * @priv: driver private structure
1655  * Description:
1656  * This stops the RX and TX DMA channels
1657  */
1658 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1659 {
1660         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1661         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1662         u32 chan = 0;
1663
1664         for (chan = 0; chan < rx_channels_count; chan++)
1665                 stmmac_stop_rx_dma(priv, chan);
1666
1667         for (chan = 0; chan < tx_channels_count; chan++)
1668                 stmmac_stop_tx_dma(priv, chan);
1669 }
1670
1671 /**
1672  *  stmmac_dma_operation_mode - HW DMA operation mode
1673  *  @priv: driver private structure
1674  *  Description: it is used for configuring the DMA operation mode register in
1675  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1676  */
1677 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1678 {
1679         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1680         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1681         int rxfifosz = priv->plat->rx_fifo_size;
1682         u32 txmode = 0;
1683         u32 rxmode = 0;
1684         u32 chan = 0;
1685
1686         if (rxfifosz == 0)
1687                 rxfifosz = priv->dma_cap.rx_fifo_size;
1688
1689         if (priv->plat->force_thresh_dma_mode) {
1690                 txmode = tc;
1691                 rxmode = tc;
1692         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1693                 /*
1694                  * In case of GMAC, SF mode can be enabled
1695                  * to perform the TX COE in HW. This depends on:
1696                  * 1) TX COE if actually supported
1697                  * 2) There is no bugged Jumbo frame support
1698                  *    that needs to not insert csum in the TDES.
1699                  */
1700                 txmode = SF_DMA_MODE;
1701                 rxmode = SF_DMA_MODE;
1702                 priv->xstats.threshold = SF_DMA_MODE;
1703         } else {
1704                 txmode = tc;
1705                 rxmode = SF_DMA_MODE;
1706         }
1707
1708         /* configure all channels */
1709         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1710                 for (chan = 0; chan < rx_channels_count; chan++)
1711                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1712                                                    rxfifosz);
1713
1714                 for (chan = 0; chan < tx_channels_count; chan++)
1715                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1716         } else {
1717                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1718                                         rxfifosz);
1719         }
1720 }
1721
1722 /**
1723  * stmmac_tx_clean - to manage the transmission completion
1724  * @priv: driver private structure
1725  * @queue: TX queue index
1726  * Description: it reclaims the transmit resources after transmission completes.
1727  */
1728 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1729 {
1730         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1731         unsigned int bytes_compl = 0, pkts_compl = 0;
1732         unsigned int entry = tx_q->dirty_tx;
1733
1734         netif_tx_lock(priv->dev);
1735
1736         priv->xstats.tx_clean++;
1737
1738         while (entry != tx_q->cur_tx) {
1739                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1740                 struct dma_desc *p;
1741                 int status;
1742
1743                 if (priv->extend_desc)
1744                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1745                 else
1746                         p = tx_q->dma_tx + entry;
1747
1748                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1749                                                       &priv->xstats, p,
1750                                                       priv->ioaddr);
1751                 /* Check if the descriptor is owned by the DMA */
1752                 if (unlikely(status & tx_dma_own))
1753                         break;
1754
1755                 /* Just consider the last segment and ...*/
1756                 if (likely(!(status & tx_not_ls))) {
1757                         /* ... verify the status error condition */
1758                         if (unlikely(status & tx_err)) {
1759                                 priv->dev->stats.tx_errors++;
1760                         } else {
1761                                 priv->dev->stats.tx_packets++;
1762                                 priv->xstats.tx_pkt_n++;
1763                         }
1764                         stmmac_get_tx_hwtstamp(priv, p, skb);
1765                 }
1766
1767                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1768                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1769                                 dma_unmap_page(priv->device,
1770                                                tx_q->tx_skbuff_dma[entry].buf,
1771                                                tx_q->tx_skbuff_dma[entry].len,
1772                                                DMA_TO_DEVICE);
1773                         else
1774                                 dma_unmap_single(priv->device,
1775                                                  tx_q->tx_skbuff_dma[entry].buf,
1776                                                  tx_q->tx_skbuff_dma[entry].len,
1777                                                  DMA_TO_DEVICE);
1778                         tx_q->tx_skbuff_dma[entry].buf = 0;
1779                         tx_q->tx_skbuff_dma[entry].len = 0;
1780                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1781                 }
1782
1783                 if (priv->hw->mode->clean_desc3)
1784                         priv->hw->mode->clean_desc3(tx_q, p);
1785
1786                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1787                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1788
1789                 if (likely(skb != NULL)) {
1790                         pkts_compl++;
1791                         bytes_compl += skb->len;
1792                         dev_consume_skb_any(skb);
1793                         tx_q->tx_skbuff[entry] = NULL;
1794                 }
1795
1796                 priv->hw->desc->release_tx_desc(p, priv->mode);
1797
1798                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1799         }
1800         tx_q->dirty_tx = entry;
1801
1802         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1803                                   pkts_compl, bytes_compl);
1804
1805         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1806                                                                queue))) &&
1807             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1808                 netif_dbg(priv, tx_done, priv->dev,
1809                           "%s: restart transmit\n", __func__);
1810                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1811         }
1812
1813         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1814                 stmmac_enable_eee_mode(priv);
1815                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1816         }
1817         netif_tx_unlock(priv->dev);
1818 }
1819
1820 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1821 {
1822         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1823 }
1824
1825 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1826 {
1827         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1828 }
1829
1830 /**
1831  * stmmac_tx_err - to manage the tx error
1832  * @priv: driver private structure
1833  * @queue: queue index
1834  * Description: it cleans the descriptors and restarts the transmission
1835  * in case of transmission errors.
1836  */
1837 static void stmmac_tx_err(struct stmmac_priv *priv, u32 queue)
1838 {
1839         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1840         u32 chan = queue;
1841         int i;
1842
1843         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
1844
1845         stmmac_stop_tx_dma(priv, chan);
1846         dma_free_tx_skbufs(priv, queue);
1847         for (i = 0; i < DMA_TX_SIZE; i++)
1848                 if (priv->extend_desc)
1849                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1850                                                      priv->mode,
1851                                                      (i == DMA_TX_SIZE - 1));
1852                 else
1853                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1854                                                      priv->mode,
1855                                                      (i == DMA_TX_SIZE - 1));
1856         tx_q->dirty_tx = 0;
1857         tx_q->cur_tx = 0;
1858         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1859         stmmac_start_tx_dma(priv, chan);
1860
1861         priv->dev->stats.tx_errors++;
1862         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1863 }
1864
1865 /**
1866  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1867  *  @priv: driver private structure
1868  *  @txmode: TX operating mode
1869  *  @rxmode: RX operating mode
1870  *  @chan: channel index
1871  *  Description: it is used for configuring of the DMA operation mode in
1872  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1873  *  mode.
1874  */
1875 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1876                                           u32 rxmode, u32 chan)
1877 {
1878         int rxfifosz = priv->plat->rx_fifo_size;
1879
1880         if (rxfifosz == 0)
1881                 rxfifosz = priv->dma_cap.rx_fifo_size;
1882
1883         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1884                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1885                                            rxfifosz);
1886                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1887         } else {
1888                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1889                                         rxfifosz);
1890         }
1891 }
1892
1893 /**
1894  * stmmac_dma_interrupt - DMA ISR
1895  * @priv: driver private structure
1896  * Description: this is the DMA ISR. It is called by the main ISR.
1897  * It calls the dwmac dma routine and schedule poll method in case of some
1898  * work can be done.
1899  */
1900 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1901 {
1902         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1903         int status;
1904         u32 chan;
1905
1906         for (chan = 0; chan < tx_channel_count; chan++) {
1907                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1908
1909                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1910                                                       &priv->xstats, chan);
1911                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1912                         if (likely(napi_schedule_prep(&rx_q->napi))) {
1913                                 stmmac_disable_dma_irq(priv, chan);
1914                                 __napi_schedule(&rx_q->napi);
1915                         }
1916                 }
1917
1918                 if (unlikely(status & tx_hard_error_bump_tc)) {
1919                         /* Try to bump up the dma threshold on this failure */
1920                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1921                             (tc <= 256)) {
1922                                 tc += 64;
1923                                 if (priv->plat->force_thresh_dma_mode)
1924                                         stmmac_set_dma_operation_mode(priv,
1925                                                                       tc,
1926                                                                       tc,
1927                                                                       chan);
1928                                 else
1929                                         stmmac_set_dma_operation_mode(priv,
1930                                                                     tc,
1931                                                                     SF_DMA_MODE,
1932                                                                     chan);
1933                                 priv->xstats.threshold = tc;
1934                         }
1935                 } else if (unlikely(status == tx_hard_error)) {
1936                         stmmac_tx_err(priv, chan);
1937                 }
1938         }
1939 }
1940
1941 /**
1942  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1943  * @priv: driver private structure
1944  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1945  */
1946 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1947 {
1948         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1949                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1950
1951         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1952                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1953                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1954         } else {
1955                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1956                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1957         }
1958
1959         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1960
1961         if (priv->dma_cap.rmon) {
1962                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1963                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1964         } else
1965                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1966 }
1967
1968 /**
1969  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1970  * @priv: driver private structure
1971  * Description: select the Enhanced/Alternate or Normal descriptors.
1972  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1973  * supported by the HW capability register.
1974  */
1975 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1976 {
1977         if (priv->plat->enh_desc) {
1978                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1979
1980                 /* GMAC older than 3.50 has no extended descriptors */
1981                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1982                         dev_info(priv->device, "Enabled extended descriptors\n");
1983                         priv->extend_desc = 1;
1984                 } else
1985                         dev_warn(priv->device, "Extended descriptors not supported\n");
1986
1987                 priv->hw->desc = &enh_desc_ops;
1988         } else {
1989                 dev_info(priv->device, "Normal descriptors\n");
1990                 priv->hw->desc = &ndesc_ops;
1991         }
1992 }
1993
1994 /**
1995  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1996  * @priv: driver private structure
1997  * Description:
1998  *  new GMAC chip generations have a new register to indicate the
1999  *  presence of the optional feature/functions.
2000  *  This can be also used to override the value passed through the
2001  *  platform and necessary for old MAC10/100 and GMAC chips.
2002  */
2003 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2004 {
2005         u32 ret = 0;
2006
2007         if (priv->hw->dma->get_hw_feature) {
2008                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2009                                               &priv->dma_cap);
2010                 ret = 1;
2011         }
2012
2013         return ret;
2014 }
2015
2016 /**
2017  * stmmac_check_ether_addr - check if the MAC addr is valid
2018  * @priv: driver private structure
2019  * Description:
2020  * it is to verify if the MAC address is valid, in case of failures it
2021  * generates a random MAC address
2022  */
2023 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2024 {
2025         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2026                 priv->hw->mac->get_umac_addr(priv->hw,
2027                                              priv->dev->dev_addr, 0);
2028                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2029                         eth_hw_addr_random(priv->dev);
2030                 netdev_info(priv->dev, "device MAC address %pM\n",
2031                             priv->dev->dev_addr);
2032         }
2033 }
2034
2035 /**
2036  * stmmac_init_dma_engine - DMA init.
2037  * @priv: driver private structure
2038  * Description:
2039  * It inits the DMA invoking the specific MAC/GMAC callback.
2040  * Some DMA parameters can be passed from the platform;
2041  * in case of these are not passed a default is kept for the MAC or GMAC.
2042  */
2043 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2044 {
2045         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2046         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2047         struct stmmac_rx_queue *rx_q;
2048         struct stmmac_tx_queue *tx_q;
2049         u32 dummy_dma_rx_phy = 0;
2050         u32 dummy_dma_tx_phy = 0;
2051         u32 chan = 0;
2052         int atds = 0;
2053         int ret = 0;
2054
2055         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2056                 dev_err(priv->device, "Invalid DMA configuration\n");
2057                 return -EINVAL;
2058         }
2059
2060         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2061                 atds = 1;
2062
2063         ret = priv->hw->dma->reset(priv->ioaddr);
2064         if (ret) {
2065                 dev_err(priv->device, "Failed to reset the dma\n");
2066                 return ret;
2067         }
2068
2069         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2070                 /* DMA Configuration */
2071                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2072                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2073
2074                 /* DMA RX Channel Configuration */
2075                 for (chan = 0; chan < rx_channels_count; chan++) {
2076                         rx_q = &priv->rx_queue[chan];
2077
2078                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2079                                                     priv->plat->dma_cfg,
2080                                                     rx_q->dma_rx_phy, chan);
2081
2082                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2083                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2084                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2085                                                        rx_q->rx_tail_addr,
2086                                                        chan);
2087                 }
2088
2089                 /* DMA TX Channel Configuration */
2090                 for (chan = 0; chan < tx_channels_count; chan++) {
2091                         tx_q = &priv->tx_queue[chan];
2092
2093                         priv->hw->dma->init_chan(priv->ioaddr,
2094                                                  priv->plat->dma_cfg,
2095                                                  chan);
2096
2097                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2098                                                     priv->plat->dma_cfg,
2099                                                     tx_q->dma_tx_phy, chan);
2100
2101                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2102                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2103                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2104                                                        tx_q->tx_tail_addr,
2105                                                        chan);
2106                 }
2107         } else {
2108                 rx_q = &priv->rx_queue[chan];
2109                 tx_q = &priv->tx_queue[chan];
2110
2111                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2112                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2113         }
2114
2115         if (priv->plat->axi && priv->hw->dma->axi)
2116                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2117
2118         return ret;
2119 }
2120
2121 /**
2122  * stmmac_tx_timer - mitigation sw timer for tx.
2123  * @data: data pointer
2124  * Description:
2125  * This is the timer handler to directly invoke the stmmac_tx_clean.
2126  */
2127 static void stmmac_tx_timer(unsigned long data)
2128 {
2129         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2130         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2131         u32 queue;
2132
2133         /* let's scan all the tx queues */
2134         for (queue = 0; queue < tx_queues_count; queue++)
2135                 stmmac_tx_clean(priv, queue);
2136 }
2137
2138 /**
2139  * stmmac_stop_all_queues - Stop all queues
2140  * @priv: driver private structure
2141  */
2142 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
2143 {
2144         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2145         u32 queue;
2146
2147         for (queue = 0; queue < tx_queues_cnt; queue++)
2148                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2149 }
2150
2151 /**
2152  * stmmac_start_all_queues - Start all queues
2153  * @priv: driver private structure
2154  */
2155 static void stmmac_start_all_queues(struct stmmac_priv *priv)
2156 {
2157         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2158         u32 queue;
2159
2160         for (queue = 0; queue < tx_queues_cnt; queue++)
2161                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
2162 }
2163
2164 /**
2165  * stmmac_disable_all_queues - Disable all queues
2166  * @priv: driver private structure
2167  */
2168 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
2169 {
2170         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2171         u32 queue;
2172
2173         for (queue = 0; queue < rx_queues_cnt; queue++) {
2174                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2175
2176                 napi_disable(&rx_q->napi);
2177         }
2178 }
2179
2180 /**
2181  * stmmac_enable_all_queues - Enable all queues
2182  * @priv: driver private structure
2183  */
2184 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
2185 {
2186         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2187         u32 queue;
2188
2189         for (queue = 0; queue < rx_queues_cnt; queue++) {
2190                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2191
2192                 napi_enable(&rx_q->napi);
2193         }
2194 }
2195
2196 /**
2197  * stmmac_init_tx_coalesce - init tx mitigation options.
2198  * @priv: driver private structure
2199  * Description:
2200  * This inits the transmit coalesce parameters: i.e. timer rate,
2201  * timer handler and default threshold used for enabling the
2202  * interrupt on completion bit.
2203  */
2204 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2205 {
2206         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2207         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2208         init_timer(&priv->txtimer);
2209         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2210         priv->txtimer.data = (unsigned long)priv;
2211         priv->txtimer.function = stmmac_tx_timer;
2212         add_timer(&priv->txtimer);
2213 }
2214
2215 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2216 {
2217         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2218         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2219         u32 chan;
2220
2221         /* set TX ring length */
2222         if (priv->hw->dma->set_tx_ring_len) {
2223                 for (chan = 0; chan < tx_channels_count; chan++)
2224                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2225                                                        (DMA_TX_SIZE - 1), chan);
2226         }
2227
2228         /* set RX ring length */
2229         if (priv->hw->dma->set_rx_ring_len) {
2230                 for (chan = 0; chan < rx_channels_count; chan++)
2231                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2232                                                        (DMA_RX_SIZE - 1), chan);
2233         }
2234 }
2235
2236 /**
2237  *  stmmac_set_tx_queue_weight - Set TX queue weight
2238  *  @priv: driver private structure
2239  *  Description: It is used for setting TX queues weight
2240  */
2241 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2242 {
2243         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2244         u32 weight;
2245         u32 queue;
2246
2247         for (queue = 0; queue < tx_queues_count; queue++) {
2248                 weight = priv->plat->tx_queues_cfg[queue].weight;
2249                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2250         }
2251 }
2252
2253 /**
2254  *  stmmac_configure_cbs - Configure CBS in TX queue
2255  *  @priv: driver private structure
2256  *  Description: It is used for configuring CBS in AVB TX queues
2257  */
2258 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2259 {
2260         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2261         u32 mode_to_use;
2262         u32 queue;
2263
2264         for (queue = 0; queue < tx_queues_count; queue++) {
2265                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2266                 if (mode_to_use == MTL_QUEUE_DCB)
2267                         continue;
2268
2269                 priv->hw->mac->config_cbs(priv->hw,
2270                                 priv->plat->tx_queues_cfg[queue].send_slope,
2271                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2272                                 priv->plat->tx_queues_cfg[queue].high_credit,
2273                                 priv->plat->tx_queues_cfg[queue].low_credit,
2274                                 queue);
2275         }
2276 }
2277
2278 /**
2279  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2280  *  @priv: driver private structure
2281  *  Description: It is used for mapping RX queues to RX dma channels
2282  */
2283 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2284 {
2285         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2286         u32 queue;
2287         u32 chan;
2288
2289         for (queue = 0; queue < rx_queues_count; queue++) {
2290                 chan = priv->plat->rx_queues_cfg[queue].chan;
2291                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2292         }
2293 }
2294
2295 /**
2296  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2297  *  @priv: driver private structure
2298  *  Description: It is used for configuring the RX Queue Priority
2299  */
2300 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2301 {
2302         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2303         u32 queue;
2304         u32 prio;
2305
2306         for (queue = 0; queue < rx_queues_count; queue++) {
2307                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2308                         continue;
2309
2310                 prio = priv->plat->rx_queues_cfg[queue].prio;
2311                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2312         }
2313 }
2314
2315 /**
2316  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2317  *  @priv: driver private structure
2318  *  Description: It is used for configuring the TX Queue Priority
2319  */
2320 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2321 {
2322         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2323         u32 queue;
2324         u32 prio;
2325
2326         for (queue = 0; queue < tx_queues_count; queue++) {
2327                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2328                         continue;
2329
2330                 prio = priv->plat->tx_queues_cfg[queue].prio;
2331                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2332         }
2333 }
2334
2335 /**
2336  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2337  *  @priv: driver private structure
2338  *  Description: It is used for configuring the RX queue routing
2339  */
2340 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2341 {
2342         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2343         u32 queue;
2344         u8 packet;
2345
2346         for (queue = 0; queue < rx_queues_count; queue++) {
2347                 /* no specific packet type routing specified for the queue */
2348                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2349                         continue;
2350
2351                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2352                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2353         }
2354 }
2355
2356 /**
2357  *  stmmac_mtl_configuration - Configure MTL
2358  *  @priv: driver private structure
2359  *  Description: It is used for configurring MTL
2360  */
2361 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2362 {
2363         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2364         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2365
2366         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2367                 stmmac_set_tx_queue_weight(priv);
2368
2369         /* Configure MTL RX algorithms */
2370         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2371                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2372                                                 priv->plat->rx_sched_algorithm);
2373
2374         /* Configure MTL TX algorithms */
2375         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2376                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2377                                                 priv->plat->tx_sched_algorithm);
2378
2379         /* Configure CBS in AVB TX queues */
2380         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2381                 stmmac_configure_cbs(priv);
2382
2383         /* Map RX MTL to DMA channels */
2384         if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
2385                 stmmac_rx_queue_dma_chan_map(priv);
2386
2387         /* Enable MAC RX Queues */
2388         if (priv->hw->mac->rx_queue_enable)
2389                 stmmac_mac_enable_rx_queues(priv);
2390
2391         /* Set the HW DMA mode and the COE */
2392         stmmac_dma_operation_mode(priv);
2393
2394         /* Set RX priorities */
2395         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2396                 stmmac_mac_config_rx_queues_prio(priv);
2397
2398         /* Set TX priorities */
2399         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2400                 stmmac_mac_config_tx_queues_prio(priv);
2401
2402         /* Set RX routing */
2403         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2404                 stmmac_mac_config_rx_queues_routing(priv);
2405 }
2406
2407 /**
2408  * stmmac_hw_setup - setup mac in a usable state.
2409  *  @dev : pointer to the device structure.
2410  *  Description:
2411  *  this is the main function to setup the HW in a usable state because the
2412  *  dma engine is reset, the core registers are configured (e.g. AXI,
2413  *  Checksum features, timers). The DMA is ready to start receiving and
2414  *  transmitting.
2415  *  Return value:
2416  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2417  *  file on failure.
2418  */
2419 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2420 {
2421         struct stmmac_priv *priv = netdev_priv(dev);
2422         u32 rx_cnt = priv->plat->rx_queues_to_use;
2423         u32 tx_cnt = priv->plat->tx_queues_to_use;
2424         u32 chan;
2425         int ret;
2426
2427         /* DMA initialization and SW reset */
2428         ret = stmmac_init_dma_engine(priv);
2429         if (ret < 0) {
2430                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2431                            __func__);
2432                 return ret;
2433         }
2434
2435         /* Copy the MAC addr into the HW  */
2436         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2437
2438         /* PS and related bits will be programmed according to the speed */
2439         if (priv->hw->pcs) {
2440                 int speed = priv->plat->mac_port_sel_speed;
2441
2442                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2443                     (speed == SPEED_1000)) {
2444                         priv->hw->ps = speed;
2445                 } else {
2446                         dev_warn(priv->device, "invalid port speed\n");
2447                         priv->hw->ps = 0;
2448                 }
2449         }
2450
2451         /* Initialize the MAC Core */
2452         priv->hw->mac->core_init(priv->hw, dev->mtu);
2453
2454         /* Initialize MTL*/
2455         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2456                 stmmac_mtl_configuration(priv);
2457
2458         ret = priv->hw->mac->rx_ipc(priv->hw);
2459         if (!ret) {
2460                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2461                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2462                 priv->hw->rx_csum = 0;
2463         }
2464
2465         /* Enable the MAC Rx/Tx */
2466         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2467                 stmmac_dwmac4_set_mac(priv->ioaddr, true);
2468         else
2469                 stmmac_set_mac(priv->ioaddr, true);
2470
2471         stmmac_mmc_setup(priv);
2472
2473         if (init_ptp) {
2474                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2475                 if (ret < 0)
2476                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2477
2478                 ret = stmmac_init_ptp(priv);
2479                 if (ret == -EOPNOTSUPP)
2480                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2481                 else if (ret)
2482                         netdev_warn(priv->dev, "PTP init failed\n");
2483         }
2484
2485 #ifdef CONFIG_DEBUG_FS
2486         ret = stmmac_init_fs(dev);
2487         if (ret < 0)
2488                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2489                             __func__);
2490 #endif
2491         /* Start the ball rolling... */
2492         stmmac_start_all_dma(priv);
2493
2494         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2495
2496         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2497                 priv->rx_riwt = MAX_DMA_RIWT;
2498                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2499         }
2500
2501         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2502                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2503
2504         /* set TX and RX rings length */
2505         stmmac_set_rings_length(priv);
2506
2507         /* Enable TSO */
2508         if (priv->tso) {
2509                 for (chan = 0; chan < tx_cnt; chan++)
2510                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2511         }
2512
2513         return 0;
2514 }
2515
2516 static void stmmac_hw_teardown(struct net_device *dev)
2517 {
2518         struct stmmac_priv *priv = netdev_priv(dev);
2519
2520         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2521 }
2522
2523 /**
2524  *  stmmac_open - open entry point of the driver
2525  *  @dev : pointer to the device structure.
2526  *  Description:
2527  *  This function is the open entry point of the driver.
2528  *  Return value:
2529  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2530  *  file on failure.
2531  */
2532 static int stmmac_open(struct net_device *dev)
2533 {
2534         struct stmmac_priv *priv = netdev_priv(dev);
2535         int ret;
2536
2537         stmmac_check_ether_addr(priv);
2538
2539         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2540             priv->hw->pcs != STMMAC_PCS_TBI &&
2541             priv->hw->pcs != STMMAC_PCS_RTBI) {
2542                 ret = stmmac_init_phy(dev);
2543                 if (ret) {
2544                         netdev_err(priv->dev,
2545                                    "%s: Cannot attach to PHY (error: %d)\n",
2546                                    __func__, ret);
2547                         return ret;
2548                 }
2549         }
2550
2551         /* Extra statistics */
2552         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2553         priv->xstats.threshold = tc;
2554
2555         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2556
2557         ret = stmmac_hw_setup(dev, true);
2558         if (ret < 0) {
2559                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2560                 goto init_error;
2561         }
2562
2563         stmmac_init_tx_coalesce(priv);
2564
2565         if (dev->phydev)
2566                 phy_start(dev->phydev);
2567
2568         /* Request the IRQ lines */
2569         ret = request_irq(dev->irq, stmmac_interrupt,
2570                           IRQF_SHARED, dev->name, dev);
2571         if (unlikely(ret < 0)) {
2572                 netdev_err(priv->dev,
2573                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2574                            __func__, dev->irq, ret);
2575                 goto irq_error;
2576         }
2577
2578         /* Request the Wake IRQ in case of another line is used for WoL */
2579         if (priv->wol_irq != dev->irq) {
2580                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2581                                   IRQF_SHARED, dev->name, dev);
2582                 if (unlikely(ret < 0)) {
2583                         netdev_err(priv->dev,
2584                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2585                                    __func__, priv->wol_irq, ret);
2586                         goto wolirq_error;
2587                 }
2588         }
2589
2590         /* Request the IRQ lines */
2591         if (priv->lpi_irq > 0) {
2592                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2593                                   dev->name, dev);
2594                 if (unlikely(ret < 0)) {
2595                         netdev_err(priv->dev,
2596                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2597                                    __func__, priv->lpi_irq, ret);
2598                         goto lpiirq_error;
2599                 }
2600         }
2601
2602         stmmac_enable_all_queues(priv);
2603         stmmac_start_all_queues(priv);
2604
2605         return 0;
2606
2607 lpiirq_error:
2608         if (priv->wol_irq != dev->irq)
2609                 free_irq(priv->wol_irq, dev);
2610 wolirq_error:
2611         free_irq(dev->irq, dev);
2612 irq_error:
2613         if (dev->phydev)
2614                 phy_stop(dev->phydev);
2615
2616         del_timer_sync(&priv->txtimer);
2617         stmmac_hw_teardown(dev);
2618 init_error:
2619         free_dma_desc_resources(priv);
2620
2621         if (dev->phydev)
2622                 phy_disconnect(dev->phydev);
2623
2624         return ret;
2625 }
2626
2627 /**
2628  *  stmmac_release - close entry point of the driver
2629  *  @dev : device pointer.
2630  *  Description:
2631  *  This is the stop entry point of the driver.
2632  */
2633 static int stmmac_release(struct net_device *dev)
2634 {
2635         struct stmmac_priv *priv = netdev_priv(dev);
2636
2637         if (priv->eee_enabled)
2638                 del_timer_sync(&priv->eee_ctrl_timer);
2639
2640         /* Stop and disconnect the PHY */
2641         if (dev->phydev) {
2642                 phy_stop(dev->phydev);
2643                 phy_disconnect(dev->phydev);
2644         }
2645
2646         stmmac_stop_all_queues(priv);
2647
2648         stmmac_disable_all_queues(priv);
2649
2650         del_timer_sync(&priv->txtimer);
2651
2652         /* Free the IRQ lines */
2653         free_irq(dev->irq, dev);
2654         if (priv->wol_irq != dev->irq)
2655                 free_irq(priv->wol_irq, dev);
2656         if (priv->lpi_irq > 0)
2657                 free_irq(priv->lpi_irq, dev);
2658
2659         /* Stop TX/RX DMA and clear the descriptors */
2660         stmmac_stop_all_dma(priv);
2661
2662         /* Release and free the Rx/Tx resources */
2663         free_dma_desc_resources(priv);
2664
2665         /* Disable the MAC Rx/Tx */
2666         stmmac_set_mac(priv->ioaddr, false);
2667
2668         netif_carrier_off(dev);
2669
2670 #ifdef CONFIG_DEBUG_FS
2671         stmmac_exit_fs(dev);
2672 #endif
2673
2674         stmmac_release_ptp(priv);
2675
2676         return 0;
2677 }
2678
2679 /**
2680  *  stmmac_tso_allocator - close entry point of the driver
2681  *  @priv: driver private structure
2682  *  @des: buffer start address
2683  *  @total_len: total length to fill in descriptors
2684  *  @last_segmant: condition for the last descriptor
2685  *  @queue: TX queue index
2686  *  Description:
2687  *  This function fills descriptor and request new descriptors according to
2688  *  buffer length to fill
2689  */
2690 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2691                                  int total_len, bool last_segment, u32 queue)
2692 {
2693         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2694         struct dma_desc *desc;
2695         u32 buff_size;
2696         int tmp_len;
2697
2698         tmp_len = total_len;
2699
2700         while (tmp_len > 0) {
2701                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2702                 desc = tx_q->dma_tx + tx_q->cur_tx;
2703
2704                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2705                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2706                             TSO_MAX_BUFF_SIZE : tmp_len;
2707
2708                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2709                         0, 1,
2710                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2711                         0, 0);
2712
2713                 tmp_len -= TSO_MAX_BUFF_SIZE;
2714         }
2715 }
2716
2717 /**
2718  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2719  *  @skb : the socket buffer
2720  *  @dev : device pointer
2721  *  Description: this is the transmit function that is called on TSO frames
2722  *  (support available on GMAC4 and newer chips).
2723  *  Diagram below show the ring programming in case of TSO frames:
2724  *
2725  *  First Descriptor
2726  *   --------
2727  *   | DES0 |---> buffer1 = L2/L3/L4 header
2728  *   | DES1 |---> TCP Payload (can continue on next descr...)
2729  *   | DES2 |---> buffer 1 and 2 len
2730  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2731  *   --------
2732  *      |
2733  *     ...
2734  *      |
2735  *   --------
2736  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2737  *   | DES1 | --|
2738  *   | DES2 | --> buffer 1 and 2 len
2739  *   | DES3 |
2740  *   --------
2741  *
2742  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2743  */
2744 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2745 {
2746         struct dma_desc *desc, *first, *mss_desc = NULL;
2747         struct stmmac_priv *priv = netdev_priv(dev);
2748         u32 queue = skb_get_queue_mapping(skb);
2749         int nfrags = skb_shinfo(skb)->nr_frags;
2750         unsigned int first_entry, des;
2751         struct stmmac_tx_queue *tx_q;
2752         int tmp_pay_len = 0;
2753         u32 pay_len, mss;
2754         u8 proto_hdr_len;
2755         int i;
2756
2757         tx_q = &priv->tx_queue[queue];
2758
2759         /* Compute header lengths */
2760         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2761
2762         /* Desc availability based on threshold should be enough safe */
2763         if (unlikely(stmmac_tx_avail(priv, queue) <
2764                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2765                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2766                         netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2767                         /* This is a hard error, log it. */
2768                         netdev_err(priv->dev,
2769                                    "%s: Tx Ring full when queue awake\n",
2770                                    __func__);
2771                 }
2772                 return NETDEV_TX_BUSY;
2773         }
2774
2775         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2776
2777         mss = skb_shinfo(skb)->gso_size;
2778
2779         /* set new MSS value if needed */
2780         if (mss != priv->mss) {
2781                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2782                 priv->hw->desc->set_mss(mss_desc, mss);
2783                 priv->mss = mss;
2784                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2785         }
2786
2787         if (netif_msg_tx_queued(priv)) {
2788                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2789                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2790                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2791                         skb->data_len);
2792         }
2793
2794         first_entry = tx_q->cur_tx;
2795
2796         desc = tx_q->dma_tx + first_entry;
2797         first = desc;
2798
2799         /* first descriptor: fill Headers on Buf1 */
2800         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2801                              DMA_TO_DEVICE);
2802         if (dma_mapping_error(priv->device, des))
2803                 goto dma_map_err;
2804
2805         tx_q->tx_skbuff_dma[first_entry].buf = des;
2806         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2807         tx_q->tx_skbuff[first_entry] = skb;
2808
2809         first->des0 = cpu_to_le32(des);
2810
2811         /* Fill start of payload in buff2 of first descriptor */
2812         if (pay_len)
2813                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2814
2815         /* If needed take extra descriptors to fill the remaining payload */
2816         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2817
2818         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2819
2820         /* Prepare fragments */
2821         for (i = 0; i < nfrags; i++) {
2822                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2823
2824                 des = skb_frag_dma_map(priv->device, frag, 0,
2825                                        skb_frag_size(frag),
2826                                        DMA_TO_DEVICE);
2827                 if (dma_mapping_error(priv->device, des))
2828                         goto dma_map_err;
2829
2830                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2831                                      (i == nfrags - 1), queue);
2832
2833                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2834                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2835                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2836                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2837         }
2838
2839         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2840
2841         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2842
2843         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2844                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2845                           __func__);
2846                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2847         }
2848
2849         dev->stats.tx_bytes += skb->len;
2850         priv->xstats.tx_tso_frames++;
2851         priv->xstats.tx_tso_nfrags += nfrags;
2852
2853         /* Manage tx mitigation */
2854         priv->tx_count_frames += nfrags + 1;
2855         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2856                 mod_timer(&priv->txtimer,
2857                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2858         } else {
2859                 priv->tx_count_frames = 0;
2860                 priv->hw->desc->set_tx_ic(desc);
2861                 priv->xstats.tx_set_ic_bit++;
2862         }
2863
2864         if (!priv->hwts_tx_en)
2865                 skb_tx_timestamp(skb);
2866
2867         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2868                      priv->hwts_tx_en)) {
2869                 /* declare that device is doing timestamping */
2870                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2871                 priv->hw->desc->enable_tx_timestamp(first);
2872         }
2873
2874         /* Complete the first descriptor before granting the DMA */
2875         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2876                         proto_hdr_len,
2877                         pay_len,
2878                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2879                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2880
2881         /* If context desc is used to change MSS */
2882         if (mss_desc)
2883                 priv->hw->desc->set_tx_owner(mss_desc);
2884
2885         /* The own bit must be the latest setting done when prepare the
2886          * descriptor and then barrier is needed to make sure that
2887          * all is coherent before granting the DMA engine.
2888          */
2889         dma_wmb();
2890
2891         if (netif_msg_pktdata(priv)) {
2892                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2893                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2894                         tx_q->cur_tx, first, nfrags);
2895
2896                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2897                                              0);
2898
2899                 pr_info(">>> frame to be transmitted: ");
2900                 print_pkt(skb->data, skb_headlen(skb));
2901         }
2902
2903         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2904
2905         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2906                                        queue);
2907
2908         return NETDEV_TX_OK;
2909
2910 dma_map_err:
2911         dev_err(priv->device, "Tx dma map failed\n");
2912         dev_kfree_skb(skb);
2913         priv->dev->stats.tx_dropped++;
2914         return NETDEV_TX_OK;
2915 }
2916
2917 /**
2918  *  stmmac_xmit - Tx entry point of the driver
2919  *  @skb : the socket buffer
2920  *  @dev : device pointer
2921  *  Description : this is the tx entry point of the driver.
2922  *  It programs the chain or the ring and supports oversized frames
2923  *  and SG feature.
2924  */
2925 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2926 {
2927         struct stmmac_priv *priv = netdev_priv(dev);
2928         unsigned int nopaged_len = skb_headlen(skb);
2929         int i, csum_insertion = 0, is_jumbo = 0;
2930         u32 queue = skb_get_queue_mapping(skb);
2931         int nfrags = skb_shinfo(skb)->nr_frags;
2932         unsigned int entry, first_entry;
2933         struct dma_desc *desc, *first;
2934         struct stmmac_tx_queue *tx_q;
2935         unsigned int enh_desc;
2936         unsigned int des;
2937
2938         tx_q = &priv->tx_queue[queue];
2939
2940         /* Manage oversized TCP frames for GMAC4 device */
2941         if (skb_is_gso(skb) && priv->tso) {
2942                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2943                         return stmmac_tso_xmit(skb, dev);
2944         }
2945
2946         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2947                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2948                         netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
2949                         /* This is a hard error, log it. */
2950                         netdev_err(priv->dev,
2951                                    "%s: Tx Ring full when queue awake\n",
2952                                    __func__);
2953                 }
2954                 return NETDEV_TX_BUSY;
2955         }
2956
2957         if (priv->tx_path_in_lpi_mode)
2958                 stmmac_disable_eee_mode(priv);
2959
2960         entry = tx_q->cur_tx;
2961         first_entry = entry;
2962
2963         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2964
2965         if (likely(priv->extend_desc))
2966                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2967         else
2968                 desc = tx_q->dma_tx + entry;
2969
2970         first = desc;
2971
2972         tx_q->tx_skbuff[first_entry] = skb;
2973
2974         enh_desc = priv->plat->enh_desc;
2975         /* To program the descriptors according to the size of the frame */
2976         if (enh_desc)
2977                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2978
2979         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2980                                          DWMAC_CORE_4_00)) {
2981                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
2982                 if (unlikely(entry < 0))
2983                         goto dma_map_err;
2984         }
2985
2986         for (i = 0; i < nfrags; i++) {
2987                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2988                 int len = skb_frag_size(frag);
2989                 bool last_segment = (i == (nfrags - 1));
2990
2991                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2992
2993                 if (likely(priv->extend_desc))
2994                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2995                 else
2996                         desc = tx_q->dma_tx + entry;
2997
2998                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2999                                        DMA_TO_DEVICE);
3000                 if (dma_mapping_error(priv->device, des))
3001                         goto dma_map_err; /* should reuse desc w/o issues */
3002
3003                 tx_q->tx_skbuff[entry] = NULL;
3004
3005                 tx_q->tx_skbuff_dma[entry].buf = des;
3006                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3007                         desc->des0 = cpu_to_le32(des);
3008                 else
3009                         desc->des2 = cpu_to_le32(des);
3010
3011                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3012                 tx_q->tx_skbuff_dma[entry].len = len;
3013                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3014
3015                 /* Prepare the descriptor and set the own bit too */
3016                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3017                                                 priv->mode, 1, last_segment);
3018         }
3019
3020         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3021
3022         tx_q->cur_tx = entry;
3023
3024         if (netif_msg_pktdata(priv)) {
3025                 void *tx_head;
3026
3027                 netdev_dbg(priv->dev,
3028                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3029                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3030                            entry, first, nfrags);
3031
3032                 if (priv->extend_desc)
3033                         tx_head = (void *)tx_q->dma_etx;
3034                 else
3035                         tx_head = (void *)tx_q->dma_tx;
3036
3037                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3038
3039                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3040                 print_pkt(skb->data, skb->len);
3041         }
3042
3043         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3044                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3045                           __func__);
3046                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
3047         }
3048
3049         dev->stats.tx_bytes += skb->len;
3050
3051         /* According to the coalesce parameter the IC bit for the latest
3052          * segment is reset and the timer re-started to clean the tx status.
3053          * This approach takes care about the fragments: desc is the first
3054          * element in case of no SG.
3055          */
3056         priv->tx_count_frames += nfrags + 1;
3057         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3058                 mod_timer(&priv->txtimer,
3059                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3060         } else {
3061                 priv->tx_count_frames = 0;
3062                 priv->hw->desc->set_tx_ic(desc);
3063                 priv->xstats.tx_set_ic_bit++;
3064         }
3065
3066         if (!priv->hwts_tx_en)
3067                 skb_tx_timestamp(skb);
3068
3069         /* Ready to fill the first descriptor and set the OWN bit w/o any
3070          * problems because all the descriptors are actually ready to be
3071          * passed to the DMA engine.
3072          */
3073         if (likely(!is_jumbo)) {
3074                 bool last_segment = (nfrags == 0);
3075
3076                 des = dma_map_single(priv->device, skb->data,
3077                                      nopaged_len, DMA_TO_DEVICE);
3078                 if (dma_mapping_error(priv->device, des))
3079                         goto dma_map_err;
3080
3081                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3082                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3083                         first->des0 = cpu_to_le32(des);
3084                 else
3085                         first->des2 = cpu_to_le32(des);
3086
3087                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3088                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3089
3090                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3091                              priv->hwts_tx_en)) {
3092                         /* declare that device is doing timestamping */
3093                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3094                         priv->hw->desc->enable_tx_timestamp(first);
3095                 }
3096
3097                 /* Prepare the first descriptor setting the OWN bit too */
3098                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3099                                                 csum_insertion, priv->mode, 1,
3100                                                 last_segment);
3101
3102                 /* The own bit must be the latest setting done when prepare the
3103                  * descriptor and then barrier is needed to make sure that
3104                  * all is coherent before granting the DMA engine.
3105                  */
3106                 dma_wmb();
3107         }
3108
3109         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3110
3111         if (priv->synopsys_id < DWMAC_CORE_4_00)
3112                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3113         else
3114                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3115                                                queue);
3116
3117         return NETDEV_TX_OK;
3118
3119 dma_map_err:
3120         netdev_err(priv->dev, "Tx DMA map failed\n");
3121         dev_kfree_skb(skb);
3122         priv->dev->stats.tx_dropped++;
3123         return NETDEV_TX_OK;
3124 }
3125
3126 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3127 {
3128         struct ethhdr *ehdr;
3129         u16 vlanid;
3130
3131         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3132             NETIF_F_HW_VLAN_CTAG_RX &&
3133             !__vlan_get_tag(skb, &vlanid)) {
3134                 /* pop the vlan tag */
3135                 ehdr = (struct ethhdr *)skb->data;
3136                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3137                 skb_pull(skb, VLAN_HLEN);
3138                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3139         }
3140 }
3141
3142
3143 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3144 {
3145         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3146                 return 0;
3147
3148         return 1;
3149 }
3150
3151 /**
3152  * stmmac_rx_refill - refill used skb preallocated buffers
3153  * @priv: driver private structure
3154  * @queue: RX queue index
3155  * Description : this is to reallocate the skb for the reception process
3156  * that is based on zero-copy.
3157  */
3158 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3159 {
3160         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3161         int dirty = stmmac_rx_dirty(priv, queue);
3162         unsigned int entry = rx_q->dirty_rx;
3163         int bfsize = priv->dma_buf_sz;
3164
3165         while (dirty-- > 0) {
3166                 struct dma_desc *p;
3167
3168                 if (priv->extend_desc)
3169                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3170                 else
3171                         p = rx_q->dma_rx + entry;
3172
3173                 if (!rx_q->rx_skbuff[entry]) {
3174                         struct sk_buff *skb;
3175
3176                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3177                         if (unlikely(!skb)) {
3178                                 /* so for a while no zero-copy! */
3179                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3180                                 if (unlikely(net_ratelimit()))
3181                                         dev_err(priv->device,
3182                                                 "fail to alloc skb entry %d\n",
3183                                                 entry);
3184                                 break;
3185                         }
3186
3187                         rx_q->rx_skbuff[entry] = skb;
3188                         rx_q->rx_skbuff_dma[entry] =
3189                             dma_map_single(priv->device, skb->data, bfsize,
3190                                            DMA_FROM_DEVICE);
3191                         if (dma_mapping_error(priv->device,
3192                                               rx_q->rx_skbuff_dma[entry])) {
3193                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3194                                 dev_kfree_skb(skb);
3195                                 break;
3196                         }
3197
3198                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3199                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3200                                 p->des1 = 0;
3201                         } else {
3202                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3203                         }
3204                         if (priv->hw->mode->refill_desc3)
3205                                 priv->hw->mode->refill_desc3(rx_q, p);
3206
3207                         if (rx_q->rx_zeroc_thresh > 0)
3208                                 rx_q->rx_zeroc_thresh--;
3209
3210                         netif_dbg(priv, rx_status, priv->dev,
3211                                   "refill entry #%d\n", entry);
3212                 }
3213                 dma_wmb();
3214
3215                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3216                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3217                 else
3218                         priv->hw->desc->set_rx_owner(p);
3219
3220                 dma_wmb();
3221
3222                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3223         }
3224         rx_q->dirty_rx = entry;
3225 }
3226
3227 /**
3228  * stmmac_rx - manage the receive process
3229  * @priv: driver private structure
3230  * @limit: napi bugget.
3231  * Description :  this the function called by the napi poll method.
3232  * It gets all the frames inside the ring.
3233  */
3234 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3235 {
3236         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3237         unsigned int entry = rx_q->cur_rx;
3238         int coe = priv->hw->rx_csum;
3239         unsigned int next_entry;
3240         unsigned int count = 0;
3241
3242         if (netif_msg_rx_status(priv)) {
3243                 void *rx_head;
3244
3245                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3246                 if (priv->extend_desc)
3247                         rx_head = (void *)rx_q->dma_erx;
3248                 else
3249                         rx_head = (void *)rx_q->dma_rx;
3250
3251                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3252         }
3253         while (count < limit) {
3254                 int status;
3255                 struct dma_desc *p;
3256                 struct dma_desc *np;
3257
3258                 if (priv->extend_desc)
3259                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3260                 else
3261                         p = rx_q->dma_rx + entry;
3262
3263                 /* read the status of the incoming frame */
3264                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3265                                                    &priv->xstats, p);
3266                 /* check if managed by the DMA otherwise go ahead */
3267                 if (unlikely(status & dma_own))
3268                         break;
3269
3270                 count++;
3271
3272                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3273                 next_entry = rx_q->cur_rx;
3274
3275                 if (priv->extend_desc)
3276                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3277                 else
3278                         np = rx_q->dma_rx + next_entry;
3279
3280                 prefetch(np);
3281
3282                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3283                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3284                                                            &priv->xstats,
3285                                                            rx_q->dma_erx +
3286                                                            entry);
3287                 if (unlikely(status == discard_frame)) {
3288                         priv->dev->stats.rx_errors++;
3289                         if (priv->hwts_rx_en && !priv->extend_desc) {
3290                                 /* DESC2 & DESC3 will be overwritten by device
3291                                  * with timestamp value, hence reinitialize
3292                                  * them in stmmac_rx_refill() function so that
3293                                  * device can reuse it.
3294                                  */
3295                                 rx_q->rx_skbuff[entry] = NULL;
3296                                 dma_unmap_single(priv->device,
3297                                                  rx_q->rx_skbuff_dma[entry],
3298                                                  priv->dma_buf_sz,
3299                                                  DMA_FROM_DEVICE);
3300                         }
3301                 } else {
3302                         struct sk_buff *skb;
3303                         int frame_len;
3304                         unsigned int des;
3305
3306                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3307                                 des = le32_to_cpu(p->des0);
3308                         else
3309                                 des = le32_to_cpu(p->des2);
3310
3311                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3312
3313                         /*  If frame length is greater than skb buffer size
3314                          *  (preallocated during init) then the packet is
3315                          *  ignored
3316                          */
3317                         if (frame_len > priv->dma_buf_sz) {
3318                                 netdev_err(priv->dev,
3319                                            "len %d larger than size (%d)\n",
3320                                            frame_len, priv->dma_buf_sz);
3321                                 priv->dev->stats.rx_length_errors++;
3322                                 break;
3323                         }
3324
3325                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3326                          * Type frames (LLC/LLC-SNAP)
3327                          */
3328                         if (unlikely(status != llc_snap))
3329                                 frame_len -= ETH_FCS_LEN;
3330
3331                         if (netif_msg_rx_status(priv)) {
3332                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3333                                            p, entry, des);
3334                                 if (frame_len > ETH_FRAME_LEN)
3335                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3336                                                    frame_len, status);
3337                         }
3338
3339                         /* The zero-copy is always used for all the sizes
3340                          * in case of GMAC4 because it needs
3341                          * to refill the used descriptors, always.
3342                          */
3343                         if (unlikely(!priv->plat->has_gmac4 &&
3344                                      ((frame_len < priv->rx_copybreak) ||
3345                                      stmmac_rx_threshold_count(rx_q)))) {
3346                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3347                                                                 frame_len);
3348                                 if (unlikely(!skb)) {
3349                                         if (net_ratelimit())
3350                                                 dev_warn(priv->device,
3351                                                          "packet dropped\n");
3352                                         priv->dev->stats.rx_dropped++;
3353                                         break;
3354                                 }
3355
3356                                 dma_sync_single_for_cpu(priv->device,
3357                                                         rx_q->rx_skbuff_dma
3358                                                         [entry], frame_len,
3359                                                         DMA_FROM_DEVICE);
3360                                 skb_copy_to_linear_data(skb,
3361                                                         rx_q->
3362                                                         rx_skbuff[entry]->data,
3363                                                         frame_len);
3364
3365                                 skb_put(skb, frame_len);
3366                                 dma_sync_single_for_device(priv->device,
3367                                                            rx_q->rx_skbuff_dma
3368                                                            [entry], frame_len,
3369                                                            DMA_FROM_DEVICE);
3370                         } else {
3371                                 skb = rx_q->rx_skbuff[entry];
3372                                 if (unlikely(!skb)) {
3373                                         netdev_err(priv->dev,
3374                                                    "%s: Inconsistent Rx chain\n",
3375                                                    priv->dev->name);
3376                                         priv->dev->stats.rx_dropped++;
3377                                         break;
3378                                 }
3379                                 prefetch(skb->data - NET_IP_ALIGN);
3380                                 rx_q->rx_skbuff[entry] = NULL;
3381                                 rx_q->rx_zeroc_thresh++;
3382
3383                                 skb_put(skb, frame_len);
3384                                 dma_unmap_single(priv->device,
3385                                                  rx_q->rx_skbuff_dma[entry],
3386                                                  priv->dma_buf_sz,
3387                                                  DMA_FROM_DEVICE);
3388                         }
3389
3390                         if (netif_msg_pktdata(priv)) {
3391                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3392                                            frame_len);
3393                                 print_pkt(skb->data, frame_len);
3394                         }
3395
3396                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3397
3398                         stmmac_rx_vlan(priv->dev, skb);
3399
3400                         skb->protocol = eth_type_trans(skb, priv->dev);
3401
3402                         if (unlikely(!coe))
3403                                 skb_checksum_none_assert(skb);
3404                         else
3405                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3406
3407                         napi_gro_receive(&rx_q->napi, skb);
3408
3409                         priv->dev->stats.rx_packets++;
3410                         priv->dev->stats.rx_bytes += frame_len;
3411                 }
3412                 entry = next_entry;
3413         }
3414
3415         stmmac_rx_refill(priv, queue);
3416
3417         priv->xstats.rx_pkt_n += count;
3418
3419         return count;
3420 }
3421
3422 /**
3423  *  stmmac_poll - stmmac poll method (NAPI)
3424  *  @napi : pointer to the napi structure.
3425  *  @budget : maximum number of packets that the current CPU can receive from
3426  *            all interfaces.
3427  *  Description :
3428  *  To look at the incoming frames and clear the tx resources.
3429  */
3430 static int stmmac_poll(struct napi_struct *napi, int budget)
3431 {
3432         struct stmmac_rx_queue *rx_q =
3433                 container_of(napi, struct stmmac_rx_queue, napi);
3434         struct stmmac_priv *priv = rx_q->priv_data;
3435         u32 tx_count = priv->dma_cap.number_tx_queues;
3436         u32 chan = rx_q->queue_index;
3437         u32 work_done = 0;
3438         u32 queue = 0;
3439
3440         priv->xstats.napi_poll++;
3441         /* check all the queues */
3442         for (queue = 0; queue < tx_count; queue++)
3443                 stmmac_tx_clean(priv, queue);
3444
3445         /* Process RX packets from this queue */
3446         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3447
3448         if (work_done < budget) {
3449                 napi_complete_done(napi, work_done);
3450                 stmmac_enable_dma_irq(priv, chan);
3451         }
3452         return work_done;
3453 }
3454
3455 /**
3456  *  stmmac_tx_timeout
3457  *  @dev : Pointer to net device structure
3458  *  Description: this function is called when a packet transmission fails to
3459  *   complete within a reasonable time. The driver will mark the error in the
3460  *   netdev structure and arrange for the device to be reset to a sane state
3461  *   in order to transmit a new packet.
3462  */
3463 static void stmmac_tx_timeout(struct net_device *dev)
3464 {
3465         struct stmmac_priv *priv = netdev_priv(dev);
3466         u32 tx_count = priv->plat->tx_queues_to_use;
3467         u32 chan;
3468
3469         /* Clear Tx resources and restart transmitting again */
3470         for (chan = 0; chan < tx_count; chan++)
3471                 stmmac_tx_err(priv, chan);
3472 }
3473
3474 /**
3475  *  stmmac_set_rx_mode - entry point for multicast addressing
3476  *  @dev : pointer to the device structure
3477  *  Description:
3478  *  This function is a driver entry point which gets called by the kernel
3479  *  whenever multicast addresses must be enabled/disabled.
3480  *  Return value:
3481  *  void.
3482  */
3483 static void stmmac_set_rx_mode(struct net_device *dev)
3484 {
3485         struct stmmac_priv *priv = netdev_priv(dev);
3486
3487         priv->hw->mac->set_filter(priv->hw, dev);
3488 }
3489
3490 /**
3491  *  stmmac_change_mtu - entry point to change MTU size for the device.
3492  *  @dev : device pointer.
3493  *  @new_mtu : the new MTU size for the device.
3494  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3495  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3496  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3497  *  Return value:
3498  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3499  *  file on failure.
3500  */
3501 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3502 {
3503         struct stmmac_priv *priv = netdev_priv(dev);
3504
3505         if (netif_running(dev)) {
3506                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3507                 return -EBUSY;
3508         }
3509
3510         dev->mtu = new_mtu;
3511
3512         netdev_update_features(dev);
3513
3514         return 0;
3515 }
3516
3517 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3518                                              netdev_features_t features)
3519 {
3520         struct stmmac_priv *priv = netdev_priv(dev);
3521
3522         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3523                 features &= ~NETIF_F_RXCSUM;
3524
3525         if (!priv->plat->tx_coe)
3526                 features &= ~NETIF_F_CSUM_MASK;
3527
3528         /* Some GMAC devices have a bugged Jumbo frame support that
3529          * needs to have the Tx COE disabled for oversized frames
3530          * (due to limited buffer sizes). In this case we disable
3531          * the TX csum insertion in the TDES and not use SF.
3532          */
3533         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3534                 features &= ~NETIF_F_CSUM_MASK;
3535
3536         /* Disable tso if asked by ethtool */
3537         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3538                 if (features & NETIF_F_TSO)
3539                         priv->tso = true;
3540                 else
3541                         priv->tso = false;
3542         }
3543
3544         return features;
3545 }
3546
3547 static int stmmac_set_features(struct net_device *netdev,
3548                                netdev_features_t features)
3549 {
3550         struct stmmac_priv *priv = netdev_priv(netdev);
3551
3552         /* Keep the COE Type in case of csum is supporting */
3553         if (features & NETIF_F_RXCSUM)
3554                 priv->hw->rx_csum = priv->plat->rx_coe;
3555         else
3556                 priv->hw->rx_csum = 0;
3557         /* No check needed because rx_coe has been set before and it will be
3558          * fixed in case of issue.
3559          */
3560         priv->hw->mac->rx_ipc(priv->hw);
3561
3562         return 0;
3563 }
3564
3565 /**
3566  *  stmmac_interrupt - main ISR
3567  *  @irq: interrupt number.
3568  *  @dev_id: to pass the net device pointer.
3569  *  Description: this is the main driver interrupt service routine.
3570  *  It can call:
3571  *  o DMA service routine (to manage incoming frame reception and transmission
3572  *    status)
3573  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3574  *    interrupts.
3575  */
3576 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3577 {
3578         struct net_device *dev = (struct net_device *)dev_id;
3579         struct stmmac_priv *priv = netdev_priv(dev);
3580         u32 rx_cnt = priv->plat->rx_queues_to_use;
3581         u32 tx_cnt = priv->plat->tx_queues_to_use;
3582         u32 queues_count;
3583         u32 queue;
3584
3585         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3586
3587         if (priv->irq_wake)
3588                 pm_wakeup_event(priv->device, 0);
3589
3590         if (unlikely(!dev)) {
3591                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3592                 return IRQ_NONE;
3593         }
3594
3595         /* To handle GMAC own interrupts */
3596         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3597                 int status = priv->hw->mac->host_irq_status(priv->hw,
3598                                                             &priv->xstats);
3599
3600                 if (unlikely(status)) {
3601                         /* For LPI we need to save the tx status */
3602                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3603                                 priv->tx_path_in_lpi_mode = true;
3604                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3605                                 priv->tx_path_in_lpi_mode = false;
3606                 }
3607
3608                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3609                         for (queue = 0; queue < queues_count; queue++) {
3610                                 struct stmmac_rx_queue *rx_q =
3611                                 &priv->rx_queue[queue];
3612
3613                                 status |=
3614                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3615                                                                    queue);
3616
3617                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3618                                     priv->hw->dma->set_rx_tail_ptr)
3619                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3620                                                                 rx_q->rx_tail_addr,
3621                                                                 queue);
3622                         }
3623                 }
3624
3625                 /* PCS link status */
3626                 if (priv->hw->pcs) {
3627                         if (priv->xstats.pcs_link)
3628                                 netif_carrier_on(dev);
3629                         else
3630                                 netif_carrier_off(dev);
3631                 }
3632         }
3633
3634         /* To handle DMA interrupts */
3635         stmmac_dma_interrupt(priv);
3636
3637         return IRQ_HANDLED;
3638 }
3639
3640 #ifdef CONFIG_NET_POLL_CONTROLLER
3641 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3642  * to allow network I/O with interrupts disabled.
3643  */
3644 static void stmmac_poll_controller(struct net_device *dev)
3645 {
3646         disable_irq(dev->irq);
3647         stmmac_interrupt(dev->irq, dev);
3648         enable_irq(dev->irq);
3649 }
3650 #endif
3651
3652 /**
3653  *  stmmac_ioctl - Entry point for the Ioctl
3654  *  @dev: Device pointer.
3655  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3656  *  a proprietary structure used to pass information to the driver.
3657  *  @cmd: IOCTL command
3658  *  Description:
3659  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3660  */
3661 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3662 {
3663         int ret = -EOPNOTSUPP;
3664
3665         if (!netif_running(dev))
3666                 return -EINVAL;
3667
3668         switch (cmd) {
3669         case SIOCGMIIPHY:
3670         case SIOCGMIIREG:
3671         case SIOCSMIIREG:
3672                 if (!dev->phydev)
3673                         return -EINVAL;
3674                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3675                 break;
3676         case SIOCSHWTSTAMP:
3677                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3678                 break;
3679         default:
3680                 break;
3681         }
3682
3683         return ret;
3684 }
3685
3686 #ifdef CONFIG_DEBUG_FS
3687 static struct dentry *stmmac_fs_dir;
3688
3689 static void sysfs_display_ring(void *head, int size, int extend_desc,
3690                                struct seq_file *seq)
3691 {
3692         int i;
3693         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3694         struct dma_desc *p = (struct dma_desc *)head;
3695
3696         for (i = 0; i < size; i++) {
3697                 if (extend_desc) {
3698                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3699                                    i, (unsigned int)virt_to_phys(ep),
3700                                    le32_to_cpu(ep->basic.des0),
3701                                    le32_to_cpu(ep->basic.des1),
3702                                    le32_to_cpu(ep->basic.des2),
3703                                    le32_to_cpu(ep->basic.des3));
3704                         ep++;
3705                 } else {
3706                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3707                                    i, (unsigned int)virt_to_phys(ep),
3708                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3709                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3710                         p++;
3711                 }
3712                 seq_printf(seq, "\n");
3713         }
3714 }
3715
3716 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3717 {
3718         struct net_device *dev = seq->private;
3719         struct stmmac_priv *priv = netdev_priv(dev);
3720         u32 rx_count = priv->plat->rx_queues_to_use;
3721         u32 tx_count = priv->plat->tx_queues_to_use;
3722         u32 queue;
3723
3724         for (queue = 0; queue < rx_count; queue++) {
3725                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3726
3727                 seq_printf(seq, "RX Queue %d:\n", queue);
3728
3729                 if (priv->extend_desc) {
3730                         seq_printf(seq, "Extended descriptor ring:\n");
3731                         sysfs_display_ring((void *)rx_q->dma_erx,
3732                                            DMA_RX_SIZE, 1, seq);
3733                 } else {
3734                         seq_printf(seq, "Descriptor ring:\n");
3735                         sysfs_display_ring((void *)rx_q->dma_rx,
3736                                            DMA_RX_SIZE, 0, seq);
3737                 }
3738         }
3739
3740         for (queue = 0; queue < tx_count; queue++) {
3741                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3742
3743                 seq_printf(seq, "TX Queue %d:\n", queue);
3744
3745                 if (priv->extend_desc) {
3746                         seq_printf(seq, "Extended descriptor ring:\n");
3747                         sysfs_display_ring((void *)tx_q->dma_etx,
3748                                            DMA_TX_SIZE, 1, seq);
3749                 } else {
3750                         seq_printf(seq, "Descriptor ring:\n");
3751                         sysfs_display_ring((void *)tx_q->dma_tx,
3752                                            DMA_TX_SIZE, 0, seq);
3753                 }
3754         }
3755
3756         return 0;
3757 }
3758
3759 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3760 {
3761         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3762 }
3763
3764 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3765
3766 static const struct file_operations stmmac_rings_status_fops = {
3767         .owner = THIS_MODULE,
3768         .open = stmmac_sysfs_ring_open,
3769         .read = seq_read,
3770         .llseek = seq_lseek,
3771         .release = single_release,
3772 };
3773
3774 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3775 {
3776         struct net_device *dev = seq->private;
3777         struct stmmac_priv *priv = netdev_priv(dev);
3778
3779         if (!priv->hw_cap_support) {
3780                 seq_printf(seq, "DMA HW features not supported\n");
3781                 return 0;
3782         }
3783
3784         seq_printf(seq, "==============================\n");
3785         seq_printf(seq, "\tDMA HW features\n");
3786         seq_printf(seq, "==============================\n");
3787
3788         seq_printf(seq, "\t10/100 Mbps: %s\n",
3789                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3790         seq_printf(seq, "\t1000 Mbps: %s\n",
3791                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3792         seq_printf(seq, "\tHalf duplex: %s\n",
3793                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3794         seq_printf(seq, "\tHash Filter: %s\n",
3795                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3796         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3797                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3798         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3799                    (priv->dma_cap.pcs) ? "Y" : "N");
3800         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3801                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3802         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3803                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3804         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3805                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3806         seq_printf(seq, "\tRMON module: %s\n",
3807                    (priv->dma_cap.rmon) ? "Y" : "N");
3808         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3809                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3810         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3811                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3812         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3813                    (priv->dma_cap.eee) ? "Y" : "N");
3814         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3815         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3816                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3817         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3818                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3819                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3820         } else {
3821                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3822                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3823                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3824                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3825         }
3826         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3827                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3828         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3829                    priv->dma_cap.number_rx_channel);
3830         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3831                    priv->dma_cap.number_tx_channel);
3832         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3833                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3834
3835         return 0;
3836 }
3837
3838 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3839 {
3840         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3841 }
3842
3843 static const struct file_operations stmmac_dma_cap_fops = {
3844         .owner = THIS_MODULE,
3845         .open = stmmac_sysfs_dma_cap_open,
3846         .read = seq_read,
3847         .llseek = seq_lseek,
3848         .release = single_release,
3849 };
3850
3851 static int stmmac_init_fs(struct net_device *dev)
3852 {
3853         struct stmmac_priv *priv = netdev_priv(dev);
3854
3855         /* Create per netdev entries */
3856         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3857
3858         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3859                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3860
3861                 return -ENOMEM;
3862         }
3863
3864         /* Entry to report DMA RX/TX rings */
3865         priv->dbgfs_rings_status =
3866                 debugfs_create_file("descriptors_status", S_IRUGO,
3867                                     priv->dbgfs_dir, dev,
3868                                     &stmmac_rings_status_fops);
3869
3870         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3871                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3872                 debugfs_remove_recursive(priv->dbgfs_dir);
3873
3874                 return -ENOMEM;
3875         }
3876
3877         /* Entry to report the DMA HW features */
3878         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3879                                             priv->dbgfs_dir,
3880                                             dev, &stmmac_dma_cap_fops);
3881
3882         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3883                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3884                 debugfs_remove_recursive(priv->dbgfs_dir);
3885
3886                 return -ENOMEM;
3887         }
3888
3889         return 0;
3890 }
3891
3892 static void stmmac_exit_fs(struct net_device *dev)
3893 {
3894         struct stmmac_priv *priv = netdev_priv(dev);
3895
3896         debugfs_remove_recursive(priv->dbgfs_dir);
3897 }
3898 #endif /* CONFIG_DEBUG_FS */
3899
3900 static const struct net_device_ops stmmac_netdev_ops = {
3901         .ndo_open = stmmac_open,
3902         .ndo_start_xmit = stmmac_xmit,
3903         .ndo_stop = stmmac_release,
3904         .ndo_change_mtu = stmmac_change_mtu,
3905         .ndo_fix_features = stmmac_fix_features,
3906         .ndo_set_features = stmmac_set_features,
3907         .ndo_set_rx_mode = stmmac_set_rx_mode,
3908         .ndo_tx_timeout = stmmac_tx_timeout,
3909         .ndo_do_ioctl = stmmac_ioctl,
3910 #ifdef CONFIG_NET_POLL_CONTROLLER
3911         .ndo_poll_controller = stmmac_poll_controller,
3912 #endif
3913         .ndo_set_mac_address = eth_mac_addr,
3914 };
3915
3916 /**
3917  *  stmmac_hw_init - Init the MAC device
3918  *  @priv: driver private structure
3919  *  Description: this function is to configure the MAC device according to
3920  *  some platform parameters or the HW capability register. It prepares the
3921  *  driver to use either ring or chain modes and to setup either enhanced or
3922  *  normal descriptors.
3923  */
3924 static int stmmac_hw_init(struct stmmac_priv *priv)
3925 {
3926         struct mac_device_info *mac;
3927
3928         /* Identify the MAC HW device */
3929         if (priv->plat->has_gmac) {
3930                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3931                 mac = dwmac1000_setup(priv->ioaddr,
3932                                       priv->plat->multicast_filter_bins,
3933                                       priv->plat->unicast_filter_entries,
3934                                       &priv->synopsys_id);
3935         } else if (priv->plat->has_gmac4) {
3936                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3937                 mac = dwmac4_setup(priv->ioaddr,
3938                                    priv->plat->multicast_filter_bins,
3939                                    priv->plat->unicast_filter_entries,
3940                                    &priv->synopsys_id);
3941         } else {
3942                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3943         }
3944         if (!mac)
3945                 return -ENOMEM;
3946
3947         priv->hw = mac;
3948
3949         /* To use the chained or ring mode */
3950         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3951                 priv->hw->mode = &dwmac4_ring_mode_ops;
3952         } else {
3953                 if (chain_mode) {
3954                         priv->hw->mode = &chain_mode_ops;
3955                         dev_info(priv->device, "Chain mode enabled\n");
3956                         priv->mode = STMMAC_CHAIN_MODE;
3957                 } else {
3958                         priv->hw->mode = &ring_mode_ops;
3959                         dev_info(priv->device, "Ring mode enabled\n");
3960                         priv->mode = STMMAC_RING_MODE;
3961                 }
3962         }
3963
3964         /* Get the HW capability (new GMAC newer than 3.50a) */
3965         priv->hw_cap_support = stmmac_get_hw_features(priv);
3966         if (priv->hw_cap_support) {
3967                 dev_info(priv->device, "DMA HW capability register supported\n");
3968
3969                 /* We can override some gmac/dma configuration fields: e.g.
3970                  * enh_desc, tx_coe (e.g. that are passed through the
3971                  * platform) with the values from the HW capability
3972                  * register (if supported).
3973                  */
3974                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3975                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3976                 priv->hw->pmt = priv->plat->pmt;
3977
3978                 /* TXCOE doesn't work in thresh DMA mode */
3979                 if (priv->plat->force_thresh_dma_mode)
3980                         priv->plat->tx_coe = 0;
3981                 else
3982                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3983
3984                 /* In case of GMAC4 rx_coe is from HW cap register. */
3985                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3986
3987                 if (priv->dma_cap.rx_coe_type2)
3988                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3989                 else if (priv->dma_cap.rx_coe_type1)
3990                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3991
3992         } else {
3993                 dev_info(priv->device, "No HW DMA feature register supported\n");
3994         }
3995
3996         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3997         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3998                 priv->hw->desc = &dwmac4_desc_ops;
3999         else
4000                 stmmac_selec_desc_mode(priv);
4001
4002         if (priv->plat->rx_coe) {
4003                 priv->hw->rx_csum = priv->plat->rx_coe;
4004                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4005                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4006                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4007         }
4008         if (priv->plat->tx_coe)
4009                 dev_info(priv->device, "TX Checksum insertion supported\n");
4010
4011         if (priv->plat->pmt) {
4012                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4013                 device_set_wakeup_capable(priv->device, 1);
4014         }
4015
4016         if (priv->dma_cap.tsoen)
4017                 dev_info(priv->device, "TSO supported\n");
4018
4019         return 0;
4020 }
4021
4022 /**
4023  * stmmac_dvr_probe
4024  * @device: device pointer
4025  * @plat_dat: platform data pointer
4026  * @res: stmmac resource pointer
4027  * Description: this is the main probe function used to
4028  * call the alloc_etherdev, allocate the priv structure.
4029  * Return:
4030  * returns 0 on success, otherwise errno.
4031  */
4032 int stmmac_dvr_probe(struct device *device,
4033                      struct plat_stmmacenet_data *plat_dat,
4034                      struct stmmac_resources *res)
4035 {
4036         struct net_device *ndev = NULL;
4037         struct stmmac_priv *priv;
4038         int ret = 0;
4039         u32 queue;
4040
4041         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4042                                   MTL_MAX_TX_QUEUES,
4043                                   MTL_MAX_RX_QUEUES);
4044         if (!ndev)
4045                 return -ENOMEM;
4046
4047         SET_NETDEV_DEV(ndev, device);
4048
4049         priv = netdev_priv(ndev);
4050         priv->device = device;
4051         priv->dev = ndev;
4052
4053         stmmac_set_ethtool_ops(ndev);
4054         priv->pause = pause;
4055         priv->plat = plat_dat;
4056         priv->ioaddr = res->addr;
4057         priv->dev->base_addr = (unsigned long)res->addr;
4058
4059         priv->dev->irq = res->irq;
4060         priv->wol_irq = res->wol_irq;
4061         priv->lpi_irq = res->lpi_irq;
4062
4063         if (res->mac)
4064                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4065
4066         dev_set_drvdata(device, priv->dev);
4067
4068         /* Verify driver arguments */
4069         stmmac_verify_args();
4070
4071         /* Override with kernel parameters if supplied XXX CRS XXX
4072          * this needs to have multiple instances
4073          */
4074         if ((phyaddr >= 0) && (phyaddr <= 31))
4075                 priv->plat->phy_addr = phyaddr;
4076
4077         if (priv->plat->stmmac_rst)
4078                 reset_control_deassert(priv->plat->stmmac_rst);
4079
4080         /* Init MAC and get the capabilities */
4081         ret = stmmac_hw_init(priv);
4082         if (ret)
4083                 goto error_hw_init;
4084
4085         /* Configure real RX and TX queues */
4086         ndev->real_num_rx_queues = priv->plat->rx_queues_to_use;
4087         ndev->real_num_tx_queues = priv->plat->tx_queues_to_use;
4088
4089         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
4090
4091         ndev->netdev_ops = &stmmac_netdev_ops;
4092
4093         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4094                             NETIF_F_RXCSUM;
4095
4096         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4097                 ndev->hw_features |= NETIF_F_TSO;
4098                 priv->tso = true;
4099                 dev_info(priv->device, "TSO feature enabled\n");
4100         }
4101         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4102         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4103 #ifdef STMMAC_VLAN_TAG_USED
4104         /* Both mac100 and gmac support receive VLAN tag detection */
4105         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4106 #endif
4107         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4108
4109         /* MTU range: 46 - hw-specific max */
4110         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4111         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4112                 ndev->max_mtu = JUMBO_LEN;
4113         else
4114                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4115         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4116          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4117          */
4118         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4119             (priv->plat->maxmtu >= ndev->min_mtu))
4120                 ndev->max_mtu = priv->plat->maxmtu;
4121         else if (priv->plat->maxmtu < ndev->min_mtu)
4122                 dev_warn(priv->device,
4123                          "%s: warning: maxmtu having invalid value (%d)\n",
4124                          __func__, priv->plat->maxmtu);
4125
4126         if (flow_ctrl)
4127                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4128
4129         /* Rx Watchdog is available in the COREs newer than the 3.40.
4130          * In some case, for example on bugged HW this feature
4131          * has to be disable and this can be done by passing the
4132          * riwt_off field from the platform.
4133          */
4134         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4135                 priv->use_riwt = 1;
4136                 dev_info(priv->device,
4137                          "Enable RX Mitigation via HW Watchdog Timer\n");
4138         }
4139
4140         ret = alloc_dma_desc_resources(priv);
4141         if (ret < 0) {
4142                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4143                            __func__);
4144                 goto init_dma_error;
4145         }
4146
4147         ret = init_dma_desc_rings(priv->dev, GFP_KERNEL);
4148         if (ret < 0) {
4149                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4150                            __func__);
4151                 goto init_dma_error;
4152         }
4153
4154         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4155                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4156
4157                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4158                                (64 * priv->plat->rx_queues_to_use));
4159         }
4160
4161         spin_lock_init(&priv->lock);
4162
4163         /* If a specific clk_csr value is passed from the platform
4164          * this means that the CSR Clock Range selection cannot be
4165          * changed at run-time and it is fixed. Viceversa the driver'll try to
4166          * set the MDC clock dynamically according to the csr actual
4167          * clock input.
4168          */
4169         if (!priv->plat->clk_csr)
4170                 stmmac_clk_csr_set(priv);
4171         else
4172                 priv->clk_csr = priv->plat->clk_csr;
4173
4174         stmmac_check_pcs_mode(priv);
4175
4176         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4177             priv->hw->pcs != STMMAC_PCS_TBI &&
4178             priv->hw->pcs != STMMAC_PCS_RTBI) {
4179                 /* MDIO bus Registration */
4180                 ret = stmmac_mdio_register(ndev);
4181                 if (ret < 0) {
4182                         dev_err(priv->device,
4183                                 "%s: MDIO bus (id: %d) registration failed",
4184                                 __func__, priv->plat->bus_id);
4185                         goto error_mdio_register;
4186                 }
4187         }
4188
4189         ret = register_netdev(ndev);
4190         if (ret) {
4191                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4192                         __func__, ret);
4193                 goto error_netdev_register;
4194         }
4195
4196         return ret;
4197
4198 error_netdev_register:
4199         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4200             priv->hw->pcs != STMMAC_PCS_TBI &&
4201             priv->hw->pcs != STMMAC_PCS_RTBI)
4202                 stmmac_mdio_unregister(ndev);
4203 error_mdio_register:
4204         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4205                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4206
4207                 netif_napi_del(&rx_q->napi);
4208         }
4209 init_dma_error:
4210         free_dma_desc_resources(priv);
4211 error_hw_init:
4212         free_netdev(ndev);
4213
4214         return ret;
4215 }
4216 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4217
4218 /**
4219  * stmmac_dvr_remove
4220  * @dev: device pointer
4221  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4222  * changes the link status, releases the DMA descriptor rings.
4223  */
4224 int stmmac_dvr_remove(struct device *dev)
4225 {
4226         struct net_device *ndev = dev_get_drvdata(dev);
4227         struct stmmac_priv *priv = netdev_priv(ndev);
4228
4229         netdev_info(priv->dev, "%s: removing driver", __func__);
4230
4231         stmmac_stop_all_dma(priv);
4232
4233         stmmac_set_mac(priv->ioaddr, false);
4234         netif_carrier_off(ndev);
4235         unregister_netdev(ndev);
4236         if (priv->plat->stmmac_rst)
4237                 reset_control_assert(priv->plat->stmmac_rst);
4238         clk_disable_unprepare(priv->plat->pclk);
4239         clk_disable_unprepare(priv->plat->stmmac_clk);
4240         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4241             priv->hw->pcs != STMMAC_PCS_TBI &&
4242             priv->hw->pcs != STMMAC_PCS_RTBI)
4243                 stmmac_mdio_unregister(ndev);
4244         free_netdev(ndev);
4245
4246         return 0;
4247 }
4248 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4249
4250 /**
4251  * stmmac_suspend - suspend callback
4252  * @dev: device pointer
4253  * Description: this is the function to suspend the device and it is called
4254  * by the platform driver to stop the network queue, release the resources,
4255  * program the PMT register (for WoL), clean and release driver resources.
4256  */
4257 int stmmac_suspend(struct device *dev)
4258 {
4259         struct net_device *ndev = dev_get_drvdata(dev);
4260         struct stmmac_priv *priv = netdev_priv(ndev);
4261         unsigned long flags;
4262
4263         if (!ndev || !netif_running(ndev))
4264                 return 0;
4265
4266         if (ndev->phydev)
4267                 phy_stop(ndev->phydev);
4268
4269         spin_lock_irqsave(&priv->lock, flags);
4270
4271         netif_device_detach(ndev);
4272         stmmac_stop_all_queues(priv);
4273
4274         stmmac_disable_all_queues(priv);
4275
4276         /* Stop TX/RX DMA */
4277         stmmac_stop_all_dma(priv);
4278
4279         /* Enable Power down mode by programming the PMT regs */
4280         if (device_may_wakeup(priv->device)) {
4281                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4282                 priv->irq_wake = 1;
4283         } else {
4284                 stmmac_set_mac(priv->ioaddr, false);
4285                 pinctrl_pm_select_sleep_state(priv->device);
4286                 /* Disable clock in case of PWM is off */
4287                 clk_disable(priv->plat->pclk);
4288                 clk_disable(priv->plat->stmmac_clk);
4289         }
4290         spin_unlock_irqrestore(&priv->lock, flags);
4291
4292         priv->oldlink = 0;
4293         priv->speed = SPEED_UNKNOWN;
4294         priv->oldduplex = DUPLEX_UNKNOWN;
4295         return 0;
4296 }
4297 EXPORT_SYMBOL_GPL(stmmac_suspend);
4298
4299 /**
4300  * stmmac_reset_queues_param - reset queue parameters
4301  * @dev: device pointer
4302  */
4303 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4304 {
4305         u32 rx_cnt = priv->plat->rx_queues_to_use;
4306         u32 tx_cnt = priv->plat->tx_queues_to_use;
4307         u32 queue;
4308
4309         for (queue = 0; queue < rx_cnt; queue++) {
4310                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4311
4312                 rx_q->cur_rx = 0;
4313                 rx_q->dirty_rx = 0;
4314         }
4315
4316         for (queue = 0; queue < tx_cnt; queue++) {
4317                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4318
4319                 tx_q->cur_tx = 0;
4320                 tx_q->dirty_tx = 0;
4321         }
4322 }
4323
4324 /**
4325  * stmmac_resume - resume callback
4326  * @dev: device pointer
4327  * Description: when resume this function is invoked to setup the DMA and CORE
4328  * in a usable state.
4329  */
4330 int stmmac_resume(struct device *dev)
4331 {
4332         struct net_device *ndev = dev_get_drvdata(dev);
4333         struct stmmac_priv *priv = netdev_priv(ndev);
4334         unsigned long flags;
4335
4336         if (!netif_running(ndev))
4337                 return 0;
4338
4339         /* Power Down bit, into the PM register, is cleared
4340          * automatically as soon as a magic packet or a Wake-up frame
4341          * is received. Anyway, it's better to manually clear
4342          * this bit because it can generate problems while resuming
4343          * from another devices (e.g. serial console).
4344          */
4345         if (device_may_wakeup(priv->device)) {
4346                 spin_lock_irqsave(&priv->lock, flags);
4347                 priv->hw->mac->pmt(priv->hw, 0);
4348                 spin_unlock_irqrestore(&priv->lock, flags);
4349                 priv->irq_wake = 0;
4350         } else {
4351                 pinctrl_pm_select_default_state(priv->device);
4352                 /* enable the clk previously disabled */
4353                 clk_enable(priv->plat->stmmac_clk);
4354                 clk_enable(priv->plat->pclk);
4355                 /* reset the phy so that it's ready */
4356                 if (priv->mii)
4357                         stmmac_mdio_reset(priv->mii);
4358         }
4359
4360         netif_device_attach(ndev);
4361
4362         spin_lock_irqsave(&priv->lock, flags);
4363
4364         stmmac_reset_queues_param(priv);
4365
4366         /* reset private mss value to force mss context settings at
4367          * next tso xmit (only used for gmac4).
4368          */
4369         priv->mss = 0;
4370
4371         stmmac_clear_descriptors(priv);
4372
4373         stmmac_hw_setup(ndev, false);
4374         stmmac_init_tx_coalesce(priv);
4375         stmmac_set_rx_mode(ndev);
4376
4377         stmmac_enable_all_queues(priv);
4378
4379         stmmac_start_all_queues(priv);
4380
4381         spin_unlock_irqrestore(&priv->lock, flags);
4382
4383         if (ndev->phydev)
4384                 phy_start(ndev->phydev);
4385
4386         return 0;
4387 }
4388 EXPORT_SYMBOL_GPL(stmmac_resume);
4389
4390 #ifndef MODULE
4391 static int __init stmmac_cmdline_opt(char *str)
4392 {
4393         char *opt;
4394
4395         if (!str || !*str)
4396                 return -EINVAL;
4397         while ((opt = strsep(&str, ",")) != NULL) {
4398                 if (!strncmp(opt, "debug:", 6)) {
4399                         if (kstrtoint(opt + 6, 0, &debug))
4400                                 goto err;
4401                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4402                         if (kstrtoint(opt + 8, 0, &phyaddr))
4403                                 goto err;
4404                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4405                         if (kstrtoint(opt + 7, 0, &buf_sz))
4406                                 goto err;
4407                 } else if (!strncmp(opt, "tc:", 3)) {
4408                         if (kstrtoint(opt + 3, 0, &tc))
4409                                 goto err;
4410                 } else if (!strncmp(opt, "watchdog:", 9)) {
4411                         if (kstrtoint(opt + 9, 0, &watchdog))
4412                                 goto err;
4413                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4414                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4415                                 goto err;
4416                 } else if (!strncmp(opt, "pause:", 6)) {
4417                         if (kstrtoint(opt + 6, 0, &pause))
4418                                 goto err;
4419                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4420                         if (kstrtoint(opt + 10, 0, &eee_timer))
4421                                 goto err;
4422                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4423                         if (kstrtoint(opt + 11, 0, &chain_mode))
4424                                 goto err;
4425                 }
4426         }
4427         return 0;
4428
4429 err:
4430         pr_err("%s: ERROR broken module parameter conversion", __func__);
4431         return -EINVAL;
4432 }
4433
4434 __setup("stmmaceth=", stmmac_cmdline_opt);
4435 #endif /* MODULE */
4436
4437 static int __init stmmac_init(void)
4438 {
4439 #ifdef CONFIG_DEBUG_FS
4440         /* Create debugfs main directory if it doesn't exist yet */
4441         if (!stmmac_fs_dir) {
4442                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4443
4444                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4445                         pr_err("ERROR %s, debugfs create directory failed\n",
4446                                STMMAC_RESOURCE_NAME);
4447
4448                         return -ENOMEM;
4449                 }
4450         }
4451 #endif
4452
4453         return 0;
4454 }
4455
4456 static void __exit stmmac_exit(void)
4457 {
4458 #ifdef CONFIG_DEBUG_FS
4459         debugfs_remove_recursive(stmmac_fs_dir);
4460 #endif
4461 }
4462
4463 module_init(stmmac_init)
4464 module_exit(stmmac_exit)
4465
4466 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4467 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4468 MODULE_LICENSE("GPL");