]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: ethernet: update drivers to make both SW and HW TX timestamps
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238 }
239
240 static void print_pkt(unsigned char *buf, int len)
241 {
242         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
243         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
244 }
245
246 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
247 {
248         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
249         u32 avail;
250
251         if (tx_q->dirty_tx > tx_q->cur_tx)
252                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
253         else
254                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
255
256         return avail;
257 }
258
259 /**
260  * stmmac_rx_dirty - Get RX queue dirty
261  * @priv: driver private structure
262  * @queue: RX queue index
263  */
264 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
265 {
266         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
267         u32 dirty;
268
269         if (rx_q->dirty_rx <= rx_q->cur_rx)
270                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
271         else
272                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
273
274         return dirty;
275 }
276
277 /**
278  * stmmac_hw_fix_mac_speed - callback for speed selection
279  * @priv: driver private structure
280  * Description: on some platforms (e.g. ST), some HW system configuration
281  * registers have to be set according to the link speed negotiated.
282  */
283 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
284 {
285         struct net_device *ndev = priv->dev;
286         struct phy_device *phydev = ndev->phydev;
287
288         if (likely(priv->plat->fix_mac_speed))
289                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
290 }
291
292 /**
293  * stmmac_enable_eee_mode - check and enter in LPI mode
294  * @priv: driver private structure
295  * Description: this function is to verify and enter in LPI mode in case of
296  * EEE.
297  */
298 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
299 {
300         u32 tx_cnt = priv->plat->tx_queues_to_use;
301         u32 queue;
302
303         /* check if all TX queues have the work finished */
304         for (queue = 0; queue < tx_cnt; queue++) {
305                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
306
307                 if (tx_q->dirty_tx != tx_q->cur_tx)
308                         return; /* still unfinished work */
309         }
310
311         /* Check and enter in LPI mode */
312         if (!priv->tx_path_in_lpi_mode)
313                 priv->hw->mac->set_eee_mode(priv->hw,
314                                             priv->plat->en_tx_lpi_clockgating);
315 }
316
317 /**
318  * stmmac_disable_eee_mode - disable and exit from LPI mode
319  * @priv: driver private structure
320  * Description: this function is to exit and disable EEE in case of
321  * LPI state is true. This is called by the xmit.
322  */
323 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
324 {
325         priv->hw->mac->reset_eee_mode(priv->hw);
326         del_timer_sync(&priv->eee_ctrl_timer);
327         priv->tx_path_in_lpi_mode = false;
328 }
329
330 /**
331  * stmmac_eee_ctrl_timer - EEE TX SW timer.
332  * @arg : data hook
333  * Description:
334  *  if there is no data transfer and if we are not in LPI state,
335  *  then MAC Transmitter can be moved to LPI state.
336  */
337 static void stmmac_eee_ctrl_timer(unsigned long arg)
338 {
339         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
340
341         stmmac_enable_eee_mode(priv);
342         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
343 }
344
345 /**
346  * stmmac_eee_init - init EEE
347  * @priv: driver private structure
348  * Description:
349  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
350  *  can also manage EEE, this function enable the LPI state and start related
351  *  timer.
352  */
353 bool stmmac_eee_init(struct stmmac_priv *priv)
354 {
355         struct net_device *ndev = priv->dev;
356         unsigned long flags;
357         bool ret = false;
358
359         /* Using PCS we cannot dial with the phy registers at this stage
360          * so we do not support extra feature like EEE.
361          */
362         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
363             (priv->hw->pcs == STMMAC_PCS_TBI) ||
364             (priv->hw->pcs == STMMAC_PCS_RTBI))
365                 goto out;
366
367         /* MAC core supports the EEE feature. */
368         if (priv->dma_cap.eee) {
369                 int tx_lpi_timer = priv->tx_lpi_timer;
370
371                 /* Check if the PHY supports EEE */
372                 if (phy_init_eee(ndev->phydev, 1)) {
373                         /* To manage at run-time if the EEE cannot be supported
374                          * anymore (for example because the lp caps have been
375                          * changed).
376                          * In that case the driver disable own timers.
377                          */
378                         spin_lock_irqsave(&priv->lock, flags);
379                         if (priv->eee_active) {
380                                 netdev_dbg(priv->dev, "disable EEE\n");
381                                 del_timer_sync(&priv->eee_ctrl_timer);
382                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
383                                                              tx_lpi_timer);
384                         }
385                         priv->eee_active = 0;
386                         spin_unlock_irqrestore(&priv->lock, flags);
387                         goto out;
388                 }
389                 /* Activate the EEE and start timers */
390                 spin_lock_irqsave(&priv->lock, flags);
391                 if (!priv->eee_active) {
392                         priv->eee_active = 1;
393                         setup_timer(&priv->eee_ctrl_timer,
394                                     stmmac_eee_ctrl_timer,
395                                     (unsigned long)priv);
396                         mod_timer(&priv->eee_ctrl_timer,
397                                   STMMAC_LPI_T(eee_timer));
398
399                         priv->hw->mac->set_eee_timer(priv->hw,
400                                                      STMMAC_DEFAULT_LIT_LS,
401                                                      tx_lpi_timer);
402                 }
403                 /* Set HW EEE according to the speed */
404                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
405
406                 ret = true;
407                 spin_unlock_irqrestore(&priv->lock, flags);
408
409                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
410         }
411 out:
412         return ret;
413 }
414
415 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
416  * @priv: driver private structure
417  * @p : descriptor pointer
418  * @skb : the socket buffer
419  * Description :
420  * This function will read timestamp from the descriptor & pass it to stack.
421  * and also perform some sanity checks.
422  */
423 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
424                                    struct dma_desc *p, struct sk_buff *skb)
425 {
426         struct skb_shared_hwtstamps shhwtstamp;
427         u64 ns;
428
429         if (!priv->hwts_tx_en)
430                 return;
431
432         /* exit if skb doesn't support hw tstamp */
433         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
434                 return;
435
436         /* check tx tstamp status */
437         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
438                 /* get the valid tstamp */
439                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
440
441                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
442                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
443
444                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
445                 /* pass tstamp to stack */
446                 skb_tstamp_tx(skb, &shhwtstamp);
447         }
448
449         return;
450 }
451
452 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
453  * @priv: driver private structure
454  * @p : descriptor pointer
455  * @np : next descriptor pointer
456  * @skb : the socket buffer
457  * Description :
458  * This function will read received packet's timestamp from the descriptor
459  * and pass it to stack. It also perform some sanity checks.
460  */
461 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
462                                    struct dma_desc *np, struct sk_buff *skb)
463 {
464         struct skb_shared_hwtstamps *shhwtstamp = NULL;
465         u64 ns;
466
467         if (!priv->hwts_rx_en)
468                 return;
469
470         /* Check if timestamp is available */
471         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
472                 /* For GMAC4, the valid timestamp is from CTX next desc. */
473                 if (priv->plat->has_gmac4)
474                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
475                 else
476                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
477
478                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
479                 shhwtstamp = skb_hwtstamps(skb);
480                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
481                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
482         } else  {
483                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
484         }
485 }
486
487 /**
488  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
489  *  @dev: device pointer.
490  *  @ifr: An IOCTL specific structure, that can contain a pointer to
491  *  a proprietary structure used to pass information to the driver.
492  *  Description:
493  *  This function configures the MAC to enable/disable both outgoing(TX)
494  *  and incoming(RX) packets time stamping based on user input.
495  *  Return Value:
496  *  0 on success and an appropriate -ve integer on failure.
497  */
498 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
499 {
500         struct stmmac_priv *priv = netdev_priv(dev);
501         struct hwtstamp_config config;
502         struct timespec64 now;
503         u64 temp = 0;
504         u32 ptp_v2 = 0;
505         u32 tstamp_all = 0;
506         u32 ptp_over_ipv4_udp = 0;
507         u32 ptp_over_ipv6_udp = 0;
508         u32 ptp_over_ethernet = 0;
509         u32 snap_type_sel = 0;
510         u32 ts_master_en = 0;
511         u32 ts_event_en = 0;
512         u32 value = 0;
513         u32 sec_inc;
514
515         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
516                 netdev_alert(priv->dev, "No support for HW time stamping\n");
517                 priv->hwts_tx_en = 0;
518                 priv->hwts_rx_en = 0;
519
520                 return -EOPNOTSUPP;
521         }
522
523         if (copy_from_user(&config, ifr->ifr_data,
524                            sizeof(struct hwtstamp_config)))
525                 return -EFAULT;
526
527         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
528                    __func__, config.flags, config.tx_type, config.rx_filter);
529
530         /* reserved for future extensions */
531         if (config.flags)
532                 return -EINVAL;
533
534         if (config.tx_type != HWTSTAMP_TX_OFF &&
535             config.tx_type != HWTSTAMP_TX_ON)
536                 return -ERANGE;
537
538         if (priv->adv_ts) {
539                 switch (config.rx_filter) {
540                 case HWTSTAMP_FILTER_NONE:
541                         /* time stamp no incoming packet at all */
542                         config.rx_filter = HWTSTAMP_FILTER_NONE;
543                         break;
544
545                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
546                         /* PTP v1, UDP, any kind of event packet */
547                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
548                         /* take time stamp for all event messages */
549                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
550
551                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
552                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
553                         break;
554
555                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
556                         /* PTP v1, UDP, Sync packet */
557                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
558                         /* take time stamp for SYNC messages only */
559                         ts_event_en = PTP_TCR_TSEVNTENA;
560
561                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563                         break;
564
565                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
566                         /* PTP v1, UDP, Delay_req packet */
567                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
568                         /* take time stamp for Delay_Req messages only */
569                         ts_master_en = PTP_TCR_TSMSTRENA;
570                         ts_event_en = PTP_TCR_TSEVNTENA;
571
572                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574                         break;
575
576                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
577                         /* PTP v2, UDP, any kind of event packet */
578                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
579                         ptp_v2 = PTP_TCR_TSVER2ENA;
580                         /* take time stamp for all event messages */
581                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582
583                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585                         break;
586
587                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
588                         /* PTP v2, UDP, Sync packet */
589                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
590                         ptp_v2 = PTP_TCR_TSVER2ENA;
591                         /* take time stamp for SYNC messages only */
592                         ts_event_en = PTP_TCR_TSEVNTENA;
593
594                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596                         break;
597
598                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
599                         /* PTP v2, UDP, Delay_req packet */
600                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
601                         ptp_v2 = PTP_TCR_TSVER2ENA;
602                         /* take time stamp for Delay_Req messages only */
603                         ts_master_en = PTP_TCR_TSMSTRENA;
604                         ts_event_en = PTP_TCR_TSEVNTENA;
605
606                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
607                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
608                         break;
609
610                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
611                         /* PTP v2/802.AS1 any layer, any kind of event packet */
612                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
613                         ptp_v2 = PTP_TCR_TSVER2ENA;
614                         /* take time stamp for all event messages */
615                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
616
617                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
619                         ptp_over_ethernet = PTP_TCR_TSIPENA;
620                         break;
621
622                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
623                         /* PTP v2/802.AS1, any layer, Sync packet */
624                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
625                         ptp_v2 = PTP_TCR_TSVER2ENA;
626                         /* take time stamp for SYNC messages only */
627                         ts_event_en = PTP_TCR_TSEVNTENA;
628
629                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631                         ptp_over_ethernet = PTP_TCR_TSIPENA;
632                         break;
633
634                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
635                         /* PTP v2/802.AS1, any layer, Delay_req packet */
636                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
637                         ptp_v2 = PTP_TCR_TSVER2ENA;
638                         /* take time stamp for Delay_Req messages only */
639                         ts_master_en = PTP_TCR_TSMSTRENA;
640                         ts_event_en = PTP_TCR_TSEVNTENA;
641
642                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644                         ptp_over_ethernet = PTP_TCR_TSIPENA;
645                         break;
646
647                 case HWTSTAMP_FILTER_NTP_ALL:
648                 case HWTSTAMP_FILTER_ALL:
649                         /* time stamp any incoming packet */
650                         config.rx_filter = HWTSTAMP_FILTER_ALL;
651                         tstamp_all = PTP_TCR_TSENALL;
652                         break;
653
654                 default:
655                         return -ERANGE;
656                 }
657         } else {
658                 switch (config.rx_filter) {
659                 case HWTSTAMP_FILTER_NONE:
660                         config.rx_filter = HWTSTAMP_FILTER_NONE;
661                         break;
662                 default:
663                         /* PTP v1, UDP, any kind of event packet */
664                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
665                         break;
666                 }
667         }
668         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
669         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
670
671         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
672                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
673         else {
674                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
675                          tstamp_all | ptp_v2 | ptp_over_ethernet |
676                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
677                          ts_master_en | snap_type_sel);
678                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
679
680                 /* program Sub Second Increment reg */
681                 sec_inc = priv->hw->ptp->config_sub_second_increment(
682                         priv->ptpaddr, priv->plat->clk_ptp_rate,
683                         priv->plat->has_gmac4);
684                 temp = div_u64(1000000000ULL, sec_inc);
685
686                 /* calculate default added value:
687                  * formula is :
688                  * addend = (2^32)/freq_div_ratio;
689                  * where, freq_div_ratio = 1e9ns/sec_inc
690                  */
691                 temp = (u64)(temp << 32);
692                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
693                 priv->hw->ptp->config_addend(priv->ptpaddr,
694                                              priv->default_addend);
695
696                 /* initialize system time */
697                 ktime_get_real_ts64(&now);
698
699                 /* lower 32 bits of tv_sec are safe until y2106 */
700                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
701                                             now.tv_nsec);
702         }
703
704         return copy_to_user(ifr->ifr_data, &config,
705                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
706 }
707
708 /**
709  * stmmac_init_ptp - init PTP
710  * @priv: driver private structure
711  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
712  * This is done by looking at the HW cap. register.
713  * This function also registers the ptp driver.
714  */
715 static int stmmac_init_ptp(struct stmmac_priv *priv)
716 {
717         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
718                 return -EOPNOTSUPP;
719
720         priv->adv_ts = 0;
721         /* Check if adv_ts can be enabled for dwmac 4.x core */
722         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
723                 priv->adv_ts = 1;
724         /* Dwmac 3.x core with extend_desc can support adv_ts */
725         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
726                 priv->adv_ts = 1;
727
728         if (priv->dma_cap.time_stamp)
729                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
730
731         if (priv->adv_ts)
732                 netdev_info(priv->dev,
733                             "IEEE 1588-2008 Advanced Timestamp supported\n");
734
735         priv->hw->ptp = &stmmac_ptp;
736         priv->hwts_tx_en = 0;
737         priv->hwts_rx_en = 0;
738
739         stmmac_ptp_register(priv);
740
741         return 0;
742 }
743
744 static void stmmac_release_ptp(struct stmmac_priv *priv)
745 {
746         if (priv->plat->clk_ptp_ref)
747                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
748         stmmac_ptp_unregister(priv);
749 }
750
751 /**
752  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
753  *  @priv: driver private structure
754  *  Description: It is used for configuring the flow control in all queues
755  */
756 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
757 {
758         u32 tx_cnt = priv->plat->tx_queues_to_use;
759
760         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
761                                  priv->pause, tx_cnt);
762 }
763
764 /**
765  * stmmac_adjust_link - adjusts the link parameters
766  * @dev: net device structure
767  * Description: this is the helper called by the physical abstraction layer
768  * drivers to communicate the phy link status. According the speed and duplex
769  * this driver can invoke registered glue-logic as well.
770  * It also invoke the eee initialization because it could happen when switch
771  * on different networks (that are eee capable).
772  */
773 static void stmmac_adjust_link(struct net_device *dev)
774 {
775         struct stmmac_priv *priv = netdev_priv(dev);
776         struct phy_device *phydev = dev->phydev;
777         unsigned long flags;
778         int new_state = 0;
779
780         if (!phydev)
781                 return;
782
783         spin_lock_irqsave(&priv->lock, flags);
784
785         if (phydev->link) {
786                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
787
788                 /* Now we make sure that we can be in full duplex mode.
789                  * If not, we operate in half-duplex mode. */
790                 if (phydev->duplex != priv->oldduplex) {
791                         new_state = 1;
792                         if (!(phydev->duplex))
793                                 ctrl &= ~priv->hw->link.duplex;
794                         else
795                                 ctrl |= priv->hw->link.duplex;
796                         priv->oldduplex = phydev->duplex;
797                 }
798                 /* Flow Control operation */
799                 if (phydev->pause)
800                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
801
802                 if (phydev->speed != priv->speed) {
803                         new_state = 1;
804                         switch (phydev->speed) {
805                         case 1000:
806                                 if (priv->plat->has_gmac ||
807                                     priv->plat->has_gmac4)
808                                         ctrl &= ~priv->hw->link.port;
809                                 break;
810                         case 100:
811                                 if (priv->plat->has_gmac ||
812                                     priv->plat->has_gmac4) {
813                                         ctrl |= priv->hw->link.port;
814                                         ctrl |= priv->hw->link.speed;
815                                 } else {
816                                         ctrl &= ~priv->hw->link.port;
817                                 }
818                                 break;
819                         case 10:
820                                 if (priv->plat->has_gmac ||
821                                     priv->plat->has_gmac4) {
822                                         ctrl |= priv->hw->link.port;
823                                         ctrl &= ~(priv->hw->link.speed);
824                                 } else {
825                                         ctrl &= ~priv->hw->link.port;
826                                 }
827                                 break;
828                         default:
829                                 netif_warn(priv, link, priv->dev,
830                                            "broken speed: %d\n", phydev->speed);
831                                 phydev->speed = SPEED_UNKNOWN;
832                                 break;
833                         }
834                         if (phydev->speed != SPEED_UNKNOWN)
835                                 stmmac_hw_fix_mac_speed(priv);
836                         priv->speed = phydev->speed;
837                 }
838
839                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
840
841                 if (!priv->oldlink) {
842                         new_state = 1;
843                         priv->oldlink = 1;
844                 }
845         } else if (priv->oldlink) {
846                 new_state = 1;
847                 priv->oldlink = 0;
848                 priv->speed = SPEED_UNKNOWN;
849                 priv->oldduplex = DUPLEX_UNKNOWN;
850         }
851
852         if (new_state && netif_msg_link(priv))
853                 phy_print_status(phydev);
854
855         spin_unlock_irqrestore(&priv->lock, flags);
856
857         if (phydev->is_pseudo_fixed_link)
858                 /* Stop PHY layer to call the hook to adjust the link in case
859                  * of a switch is attached to the stmmac driver.
860                  */
861                 phydev->irq = PHY_IGNORE_INTERRUPT;
862         else
863                 /* At this stage, init the EEE if supported.
864                  * Never called in case of fixed_link.
865                  */
866                 priv->eee_enabled = stmmac_eee_init(priv);
867 }
868
869 /**
870  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
871  * @priv: driver private structure
872  * Description: this is to verify if the HW supports the PCS.
873  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
874  * configured for the TBI, RTBI, or SGMII PHY interface.
875  */
876 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
877 {
878         int interface = priv->plat->interface;
879
880         if (priv->dma_cap.pcs) {
881                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
882                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
883                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
884                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
885                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
886                         priv->hw->pcs = STMMAC_PCS_RGMII;
887                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
888                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
889                         priv->hw->pcs = STMMAC_PCS_SGMII;
890                 }
891         }
892 }
893
894 /**
895  * stmmac_init_phy - PHY initialization
896  * @dev: net device structure
897  * Description: it initializes the driver's PHY state, and attaches the PHY
898  * to the mac driver.
899  *  Return value:
900  *  0 on success
901  */
902 static int stmmac_init_phy(struct net_device *dev)
903 {
904         struct stmmac_priv *priv = netdev_priv(dev);
905         struct phy_device *phydev;
906         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
907         char bus_id[MII_BUS_ID_SIZE];
908         int interface = priv->plat->interface;
909         int max_speed = priv->plat->max_speed;
910         priv->oldlink = 0;
911         priv->speed = SPEED_UNKNOWN;
912         priv->oldduplex = DUPLEX_UNKNOWN;
913
914         if (priv->plat->phy_node) {
915                 phydev = of_phy_connect(dev, priv->plat->phy_node,
916                                         &stmmac_adjust_link, 0, interface);
917         } else {
918                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
919                          priv->plat->bus_id);
920
921                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
922                          priv->plat->phy_addr);
923                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
924                            phy_id_fmt);
925
926                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
927                                      interface);
928         }
929
930         if (IS_ERR_OR_NULL(phydev)) {
931                 netdev_err(priv->dev, "Could not attach to PHY\n");
932                 if (!phydev)
933                         return -ENODEV;
934
935                 return PTR_ERR(phydev);
936         }
937
938         /* Stop Advertising 1000BASE Capability if interface is not GMII */
939         if ((interface == PHY_INTERFACE_MODE_MII) ||
940             (interface == PHY_INTERFACE_MODE_RMII) ||
941                 (max_speed < 1000 && max_speed > 0))
942                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
943                                          SUPPORTED_1000baseT_Full);
944
945         /*
946          * Broken HW is sometimes missing the pull-up resistor on the
947          * MDIO line, which results in reads to non-existent devices returning
948          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
949          * device as well.
950          * Note: phydev->phy_id is the result of reading the UID PHY registers.
951          */
952         if (!priv->plat->phy_node && phydev->phy_id == 0) {
953                 phy_disconnect(phydev);
954                 return -ENODEV;
955         }
956
957         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
958          * subsequent PHY polling, make sure we force a link transition if
959          * we have a UP/DOWN/UP transition
960          */
961         if (phydev->is_pseudo_fixed_link)
962                 phydev->irq = PHY_POLL;
963
964         phy_attached_info(phydev);
965         return 0;
966 }
967
968 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
969 {
970         u32 rx_cnt = priv->plat->rx_queues_to_use;
971         void *head_rx;
972         u32 queue;
973
974         /* Display RX rings */
975         for (queue = 0; queue < rx_cnt; queue++) {
976                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
977
978                 pr_info("\tRX Queue %u rings\n", queue);
979
980                 if (priv->extend_desc)
981                         head_rx = (void *)rx_q->dma_erx;
982                 else
983                         head_rx = (void *)rx_q->dma_rx;
984
985                 /* Display RX ring */
986                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
987         }
988 }
989
990 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
991 {
992         u32 tx_cnt = priv->plat->tx_queues_to_use;
993         void *head_tx;
994         u32 queue;
995
996         /* Display TX rings */
997         for (queue = 0; queue < tx_cnt; queue++) {
998                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
999
1000                 pr_info("\tTX Queue %d rings\n", queue);
1001
1002                 if (priv->extend_desc)
1003                         head_tx = (void *)tx_q->dma_etx;
1004                 else
1005                         head_tx = (void *)tx_q->dma_tx;
1006
1007                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1008         }
1009 }
1010
1011 static void stmmac_display_rings(struct stmmac_priv *priv)
1012 {
1013         /* Display RX ring */
1014         stmmac_display_rx_rings(priv);
1015
1016         /* Display TX ring */
1017         stmmac_display_tx_rings(priv);
1018 }
1019
1020 static int stmmac_set_bfsize(int mtu, int bufsize)
1021 {
1022         int ret = bufsize;
1023
1024         if (mtu >= BUF_SIZE_4KiB)
1025                 ret = BUF_SIZE_8KiB;
1026         else if (mtu >= BUF_SIZE_2KiB)
1027                 ret = BUF_SIZE_4KiB;
1028         else if (mtu > DEFAULT_BUFSIZE)
1029                 ret = BUF_SIZE_2KiB;
1030         else
1031                 ret = DEFAULT_BUFSIZE;
1032
1033         return ret;
1034 }
1035
1036 /**
1037  * stmmac_clear_rx_descriptors - clear RX descriptors
1038  * @priv: driver private structure
1039  * @queue: RX queue index
1040  * Description: this function is called to clear the RX descriptors
1041  * in case of both basic and extended descriptors are used.
1042  */
1043 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1044 {
1045         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1046         int i;
1047
1048         /* Clear the RX descriptors */
1049         for (i = 0; i < DMA_RX_SIZE; i++)
1050                 if (priv->extend_desc)
1051                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1052                                                      priv->use_riwt, priv->mode,
1053                                                      (i == DMA_RX_SIZE - 1));
1054                 else
1055                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1056                                                      priv->use_riwt, priv->mode,
1057                                                      (i == DMA_RX_SIZE - 1));
1058 }
1059
1060 /**
1061  * stmmac_clear_tx_descriptors - clear tx descriptors
1062  * @priv: driver private structure
1063  * @queue: TX queue index.
1064  * Description: this function is called to clear the TX descriptors
1065  * in case of both basic and extended descriptors are used.
1066  */
1067 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1068 {
1069         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1070         int i;
1071
1072         /* Clear the TX descriptors */
1073         for (i = 0; i < DMA_TX_SIZE; i++)
1074                 if (priv->extend_desc)
1075                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1076                                                      priv->mode,
1077                                                      (i == DMA_TX_SIZE - 1));
1078                 else
1079                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1080                                                      priv->mode,
1081                                                      (i == DMA_TX_SIZE - 1));
1082 }
1083
1084 /**
1085  * stmmac_clear_descriptors - clear descriptors
1086  * @priv: driver private structure
1087  * Description: this function is called to clear the TX and RX descriptors
1088  * in case of both basic and extended descriptors are used.
1089  */
1090 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1091 {
1092         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1093         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1094         u32 queue;
1095
1096         /* Clear the RX descriptors */
1097         for (queue = 0; queue < rx_queue_cnt; queue++)
1098                 stmmac_clear_rx_descriptors(priv, queue);
1099
1100         /* Clear the TX descriptors */
1101         for (queue = 0; queue < tx_queue_cnt; queue++)
1102                 stmmac_clear_tx_descriptors(priv, queue);
1103 }
1104
1105 /**
1106  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1107  * @priv: driver private structure
1108  * @p: descriptor pointer
1109  * @i: descriptor index
1110  * @flags: gfp flag
1111  * @queue: RX queue index
1112  * Description: this function is called to allocate a receive buffer, perform
1113  * the DMA mapping and init the descriptor.
1114  */
1115 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1116                                   int i, gfp_t flags, u32 queue)
1117 {
1118         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1119         struct sk_buff *skb;
1120
1121         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1122         if (!skb) {
1123                 netdev_err(priv->dev,
1124                            "%s: Rx init fails; skb is NULL\n", __func__);
1125                 return -ENOMEM;
1126         }
1127         rx_q->rx_skbuff[i] = skb;
1128         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1129                                                 priv->dma_buf_sz,
1130                                                 DMA_FROM_DEVICE);
1131         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1132                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1133                 dev_kfree_skb_any(skb);
1134                 return -EINVAL;
1135         }
1136
1137         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1138                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1139         else
1140                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1141
1142         if ((priv->hw->mode->init_desc3) &&
1143             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1144                 priv->hw->mode->init_desc3(p);
1145
1146         return 0;
1147 }
1148
1149 /**
1150  * stmmac_free_rx_buffer - free RX dma buffers
1151  * @priv: private structure
1152  * @queue: RX queue index
1153  * @i: buffer index.
1154  */
1155 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1156 {
1157         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1158
1159         if (rx_q->rx_skbuff[i]) {
1160                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1161                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1162                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1163         }
1164         rx_q->rx_skbuff[i] = NULL;
1165 }
1166
1167 /**
1168  * stmmac_free_tx_buffer - free RX dma buffers
1169  * @priv: private structure
1170  * @queue: RX queue index
1171  * @i: buffer index.
1172  */
1173 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1174 {
1175         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1176
1177         if (tx_q->tx_skbuff_dma[i].buf) {
1178                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1179                         dma_unmap_page(priv->device,
1180                                        tx_q->tx_skbuff_dma[i].buf,
1181                                        tx_q->tx_skbuff_dma[i].len,
1182                                        DMA_TO_DEVICE);
1183                 else
1184                         dma_unmap_single(priv->device,
1185                                          tx_q->tx_skbuff_dma[i].buf,
1186                                          tx_q->tx_skbuff_dma[i].len,
1187                                          DMA_TO_DEVICE);
1188         }
1189
1190         if (tx_q->tx_skbuff[i]) {
1191                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1192                 tx_q->tx_skbuff[i] = NULL;
1193                 tx_q->tx_skbuff_dma[i].buf = 0;
1194                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1195         }
1196 }
1197
1198 /**
1199  * init_dma_rx_desc_rings - init the RX descriptor rings
1200  * @dev: net device structure
1201  * @flags: gfp flag.
1202  * Description: this function initializes the DMA RX descriptors
1203  * and allocates the socket buffers. It supports the chained and ring
1204  * modes.
1205  */
1206 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1207 {
1208         struct stmmac_priv *priv = netdev_priv(dev);
1209         u32 rx_count = priv->plat->rx_queues_to_use;
1210         unsigned int bfsize = 0;
1211         int ret = -ENOMEM;
1212         u32 queue;
1213         int i;
1214
1215         if (priv->hw->mode->set_16kib_bfsize)
1216                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1217
1218         if (bfsize < BUF_SIZE_16KiB)
1219                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1220
1221         priv->dma_buf_sz = bfsize;
1222
1223         /* RX INITIALIZATION */
1224         netif_dbg(priv, probe, priv->dev,
1225                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1226
1227         for (queue = 0; queue < rx_count; queue++) {
1228                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1229
1230                 netif_dbg(priv, probe, priv->dev,
1231                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1232                           (u32)rx_q->dma_rx_phy);
1233
1234                 for (i = 0; i < DMA_RX_SIZE; i++) {
1235                         struct dma_desc *p;
1236
1237                         if (priv->extend_desc)
1238                                 p = &((rx_q->dma_erx + i)->basic);
1239                         else
1240                                 p = rx_q->dma_rx + i;
1241
1242                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1243                                                      queue);
1244                         if (ret)
1245                                 goto err_init_rx_buffers;
1246
1247                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1248                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1249                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1250                 }
1251
1252                 rx_q->cur_rx = 0;
1253                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1254
1255                 stmmac_clear_rx_descriptors(priv, queue);
1256
1257                 /* Setup the chained descriptor addresses */
1258                 if (priv->mode == STMMAC_CHAIN_MODE) {
1259                         if (priv->extend_desc)
1260                                 priv->hw->mode->init(rx_q->dma_erx,
1261                                                      rx_q->dma_rx_phy,
1262                                                      DMA_RX_SIZE, 1);
1263                         else
1264                                 priv->hw->mode->init(rx_q->dma_rx,
1265                                                      rx_q->dma_rx_phy,
1266                                                      DMA_RX_SIZE, 0);
1267                 }
1268         }
1269
1270         buf_sz = bfsize;
1271
1272         return 0;
1273
1274 err_init_rx_buffers:
1275         while (queue >= 0) {
1276                 while (--i >= 0)
1277                         stmmac_free_rx_buffer(priv, queue, i);
1278
1279                 if (queue == 0)
1280                         break;
1281
1282                 i = DMA_RX_SIZE;
1283                 queue--;
1284         }
1285
1286         return ret;
1287 }
1288
1289 /**
1290  * init_dma_tx_desc_rings - init the TX descriptor rings
1291  * @dev: net device structure.
1292  * Description: this function initializes the DMA TX descriptors
1293  * and allocates the socket buffers. It supports the chained and ring
1294  * modes.
1295  */
1296 static int init_dma_tx_desc_rings(struct net_device *dev)
1297 {
1298         struct stmmac_priv *priv = netdev_priv(dev);
1299         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1300         u32 queue;
1301         int i;
1302
1303         for (queue = 0; queue < tx_queue_cnt; queue++) {
1304                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1305
1306                 netif_dbg(priv, probe, priv->dev,
1307                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1308                          (u32)tx_q->dma_tx_phy);
1309
1310                 /* Setup the chained descriptor addresses */
1311                 if (priv->mode == STMMAC_CHAIN_MODE) {
1312                         if (priv->extend_desc)
1313                                 priv->hw->mode->init(tx_q->dma_etx,
1314                                                      tx_q->dma_tx_phy,
1315                                                      DMA_TX_SIZE, 1);
1316                         else
1317                                 priv->hw->mode->init(tx_q->dma_tx,
1318                                                      tx_q->dma_tx_phy,
1319                                                      DMA_TX_SIZE, 0);
1320                 }
1321
1322                 for (i = 0; i < DMA_TX_SIZE; i++) {
1323                         struct dma_desc *p;
1324                         if (priv->extend_desc)
1325                                 p = &((tx_q->dma_etx + i)->basic);
1326                         else
1327                                 p = tx_q->dma_tx + i;
1328
1329                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1330                                 p->des0 = 0;
1331                                 p->des1 = 0;
1332                                 p->des2 = 0;
1333                                 p->des3 = 0;
1334                         } else {
1335                                 p->des2 = 0;
1336                         }
1337
1338                         tx_q->tx_skbuff_dma[i].buf = 0;
1339                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1340                         tx_q->tx_skbuff_dma[i].len = 0;
1341                         tx_q->tx_skbuff_dma[i].last_segment = false;
1342                         tx_q->tx_skbuff[i] = NULL;
1343                 }
1344
1345                 tx_q->dirty_tx = 0;
1346                 tx_q->cur_tx = 0;
1347
1348                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1349         }
1350
1351         return 0;
1352 }
1353
1354 /**
1355  * init_dma_desc_rings - init the RX/TX descriptor rings
1356  * @dev: net device structure
1357  * @flags: gfp flag.
1358  * Description: this function initializes the DMA RX/TX descriptors
1359  * and allocates the socket buffers. It supports the chained and ring
1360  * modes.
1361  */
1362 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1363 {
1364         struct stmmac_priv *priv = netdev_priv(dev);
1365         int ret;
1366
1367         ret = init_dma_rx_desc_rings(dev, flags);
1368         if (ret)
1369                 return ret;
1370
1371         ret = init_dma_tx_desc_rings(dev);
1372
1373         stmmac_clear_descriptors(priv);
1374
1375         if (netif_msg_hw(priv))
1376                 stmmac_display_rings(priv);
1377
1378         return ret;
1379 }
1380
1381 /**
1382  * dma_free_rx_skbufs - free RX dma buffers
1383  * @priv: private structure
1384  * @queue: RX queue index
1385  */
1386 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1387 {
1388         int i;
1389
1390         for (i = 0; i < DMA_RX_SIZE; i++)
1391                 stmmac_free_rx_buffer(priv, queue, i);
1392 }
1393
1394 /**
1395  * dma_free_tx_skbufs - free TX dma buffers
1396  * @priv: private structure
1397  * @queue: TX queue index
1398  */
1399 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1400 {
1401         int i;
1402
1403         for (i = 0; i < DMA_TX_SIZE; i++)
1404                 stmmac_free_tx_buffer(priv, queue, i);
1405 }
1406
1407 /**
1408  * free_dma_rx_desc_resources - free RX dma desc resources
1409  * @priv: private structure
1410  */
1411 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1412 {
1413         u32 rx_count = priv->plat->rx_queues_to_use;
1414         u32 queue;
1415
1416         /* Free RX queue resources */
1417         for (queue = 0; queue < rx_count; queue++) {
1418                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1419
1420                 /* Release the DMA RX socket buffers */
1421                 dma_free_rx_skbufs(priv, queue);
1422
1423                 /* Free DMA regions of consistent memory previously allocated */
1424                 if (!priv->extend_desc)
1425                         dma_free_coherent(priv->device,
1426                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1427                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1428                 else
1429                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1430                                           sizeof(struct dma_extended_desc),
1431                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1432
1433                 kfree(rx_q->rx_skbuff_dma);
1434                 kfree(rx_q->rx_skbuff);
1435         }
1436 }
1437
1438 /**
1439  * free_dma_tx_desc_resources - free TX dma desc resources
1440  * @priv: private structure
1441  */
1442 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1443 {
1444         u32 tx_count = priv->plat->tx_queues_to_use;
1445         u32 queue = 0;
1446
1447         /* Free TX queue resources */
1448         for (queue = 0; queue < tx_count; queue++) {
1449                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1450
1451                 /* Release the DMA TX socket buffers */
1452                 dma_free_tx_skbufs(priv, queue);
1453
1454                 /* Free DMA regions of consistent memory previously allocated */
1455                 if (!priv->extend_desc)
1456                         dma_free_coherent(priv->device,
1457                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1458                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1459                 else
1460                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1461                                           sizeof(struct dma_extended_desc),
1462                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1463
1464                 kfree(tx_q->tx_skbuff_dma);
1465                 kfree(tx_q->tx_skbuff);
1466         }
1467 }
1468
1469 /**
1470  * alloc_dma_rx_desc_resources - alloc RX resources.
1471  * @priv: private structure
1472  * Description: according to which descriptor can be used (extend or basic)
1473  * this function allocates the resources for TX and RX paths. In case of
1474  * reception, for example, it pre-allocated the RX socket buffer in order to
1475  * allow zero-copy mechanism.
1476  */
1477 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1478 {
1479         u32 rx_count = priv->plat->rx_queues_to_use;
1480         int ret = -ENOMEM;
1481         u32 queue;
1482
1483         /* RX queues buffers and DMA */
1484         for (queue = 0; queue < rx_count; queue++) {
1485                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1486
1487                 rx_q->queue_index = queue;
1488                 rx_q->priv_data = priv;
1489
1490                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1491                                                     sizeof(dma_addr_t),
1492                                                     GFP_KERNEL);
1493                 if (!rx_q->rx_skbuff_dma)
1494                         return -ENOMEM;
1495
1496                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1497                                                 sizeof(struct sk_buff *),
1498                                                 GFP_KERNEL);
1499                 if (!rx_q->rx_skbuff)
1500                         goto err_dma;
1501
1502                 if (priv->extend_desc) {
1503                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1504                                                             DMA_RX_SIZE *
1505                                                             sizeof(struct
1506                                                             dma_extended_desc),
1507                                                             &rx_q->dma_rx_phy,
1508                                                             GFP_KERNEL);
1509                         if (!rx_q->dma_erx)
1510                                 goto err_dma;
1511
1512                 } else {
1513                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1514                                                            DMA_RX_SIZE *
1515                                                            sizeof(struct
1516                                                            dma_desc),
1517                                                            &rx_q->dma_rx_phy,
1518                                                            GFP_KERNEL);
1519                         if (!rx_q->dma_rx)
1520                                 goto err_dma;
1521                 }
1522         }
1523
1524         return 0;
1525
1526 err_dma:
1527         free_dma_rx_desc_resources(priv);
1528
1529         return ret;
1530 }
1531
1532 /**
1533  * alloc_dma_tx_desc_resources - alloc TX resources.
1534  * @priv: private structure
1535  * Description: according to which descriptor can be used (extend or basic)
1536  * this function allocates the resources for TX and RX paths. In case of
1537  * reception, for example, it pre-allocated the RX socket buffer in order to
1538  * allow zero-copy mechanism.
1539  */
1540 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1541 {
1542         u32 tx_count = priv->plat->tx_queues_to_use;
1543         int ret = -ENOMEM;
1544         u32 queue;
1545
1546         /* TX queues buffers and DMA */
1547         for (queue = 0; queue < tx_count; queue++) {
1548                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1549
1550                 tx_q->queue_index = queue;
1551                 tx_q->priv_data = priv;
1552
1553                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1554                                                     sizeof(*tx_q->tx_skbuff_dma),
1555                                                     GFP_KERNEL);
1556                 if (!tx_q->tx_skbuff_dma)
1557                         return -ENOMEM;
1558
1559                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1560                                                 sizeof(struct sk_buff *),
1561                                                 GFP_KERNEL);
1562                 if (!tx_q->tx_skbuff)
1563                         goto err_dma_buffers;
1564
1565                 if (priv->extend_desc) {
1566                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1567                                                             DMA_TX_SIZE *
1568                                                             sizeof(struct
1569                                                             dma_extended_desc),
1570                                                             &tx_q->dma_tx_phy,
1571                                                             GFP_KERNEL);
1572                         if (!tx_q->dma_etx)
1573                                 goto err_dma_buffers;
1574                 } else {
1575                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1576                                                            DMA_TX_SIZE *
1577                                                            sizeof(struct
1578                                                                   dma_desc),
1579                                                            &tx_q->dma_tx_phy,
1580                                                            GFP_KERNEL);
1581                         if (!tx_q->dma_tx)
1582                                 goto err_dma_buffers;
1583                 }
1584         }
1585
1586         return 0;
1587
1588 err_dma_buffers:
1589         free_dma_tx_desc_resources(priv);
1590
1591         return ret;
1592 }
1593
1594 /**
1595  * alloc_dma_desc_resources - alloc TX/RX resources.
1596  * @priv: private structure
1597  * Description: according to which descriptor can be used (extend or basic)
1598  * this function allocates the resources for TX and RX paths. In case of
1599  * reception, for example, it pre-allocated the RX socket buffer in order to
1600  * allow zero-copy mechanism.
1601  */
1602 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1603 {
1604         /* RX Allocation */
1605         int ret = alloc_dma_rx_desc_resources(priv);
1606
1607         if (ret)
1608                 return ret;
1609
1610         ret = alloc_dma_tx_desc_resources(priv);
1611
1612         return ret;
1613 }
1614
1615 /**
1616  * free_dma_desc_resources - free dma desc resources
1617  * @priv: private structure
1618  */
1619 static void free_dma_desc_resources(struct stmmac_priv *priv)
1620 {
1621         /* Release the DMA RX socket buffers */
1622         free_dma_rx_desc_resources(priv);
1623
1624         /* Release the DMA TX socket buffers */
1625         free_dma_tx_desc_resources(priv);
1626 }
1627
1628 /**
1629  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1630  *  @priv: driver private structure
1631  *  Description: It is used for enabling the rx queues in the MAC
1632  */
1633 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1634 {
1635         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1636         int queue;
1637         u8 mode;
1638
1639         for (queue = 0; queue < rx_queues_count; queue++) {
1640                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1641                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1642         }
1643 }
1644
1645 /**
1646  * stmmac_start_rx_dma - start RX DMA channel
1647  * @priv: driver private structure
1648  * @chan: RX channel index
1649  * Description:
1650  * This starts a RX DMA channel
1651  */
1652 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1653 {
1654         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1655         priv->hw->dma->start_rx(priv->ioaddr, chan);
1656 }
1657
1658 /**
1659  * stmmac_start_tx_dma - start TX DMA channel
1660  * @priv: driver private structure
1661  * @chan: TX channel index
1662  * Description:
1663  * This starts a TX DMA channel
1664  */
1665 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1666 {
1667         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1668         priv->hw->dma->start_tx(priv->ioaddr, chan);
1669 }
1670
1671 /**
1672  * stmmac_stop_rx_dma - stop RX DMA channel
1673  * @priv: driver private structure
1674  * @chan: RX channel index
1675  * Description:
1676  * This stops a RX DMA channel
1677  */
1678 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1679 {
1680         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1681         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1682 }
1683
1684 /**
1685  * stmmac_stop_tx_dma - stop TX DMA channel
1686  * @priv: driver private structure
1687  * @chan: TX channel index
1688  * Description:
1689  * This stops a TX DMA channel
1690  */
1691 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1692 {
1693         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1694         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1695 }
1696
1697 /**
1698  * stmmac_start_all_dma - start all RX and TX DMA channels
1699  * @priv: driver private structure
1700  * Description:
1701  * This starts all the RX and TX DMA channels
1702  */
1703 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1704 {
1705         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1706         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1707         u32 chan = 0;
1708
1709         for (chan = 0; chan < rx_channels_count; chan++)
1710                 stmmac_start_rx_dma(priv, chan);
1711
1712         for (chan = 0; chan < tx_channels_count; chan++)
1713                 stmmac_start_tx_dma(priv, chan);
1714 }
1715
1716 /**
1717  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1718  * @priv: driver private structure
1719  * Description:
1720  * This stops the RX and TX DMA channels
1721  */
1722 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1723 {
1724         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1725         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1726         u32 chan = 0;
1727
1728         for (chan = 0; chan < rx_channels_count; chan++)
1729                 stmmac_stop_rx_dma(priv, chan);
1730
1731         for (chan = 0; chan < tx_channels_count; chan++)
1732                 stmmac_stop_tx_dma(priv, chan);
1733 }
1734
1735 /**
1736  *  stmmac_dma_operation_mode - HW DMA operation mode
1737  *  @priv: driver private structure
1738  *  Description: it is used for configuring the DMA operation mode register in
1739  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1740  */
1741 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1742 {
1743         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1744         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1745         int rxfifosz = priv->plat->rx_fifo_size;
1746         u32 txmode = 0;
1747         u32 rxmode = 0;
1748         u32 chan = 0;
1749
1750         if (rxfifosz == 0)
1751                 rxfifosz = priv->dma_cap.rx_fifo_size;
1752
1753         if (priv->plat->force_thresh_dma_mode) {
1754                 txmode = tc;
1755                 rxmode = tc;
1756         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1757                 /*
1758                  * In case of GMAC, SF mode can be enabled
1759                  * to perform the TX COE in HW. This depends on:
1760                  * 1) TX COE if actually supported
1761                  * 2) There is no bugged Jumbo frame support
1762                  *    that needs to not insert csum in the TDES.
1763                  */
1764                 txmode = SF_DMA_MODE;
1765                 rxmode = SF_DMA_MODE;
1766                 priv->xstats.threshold = SF_DMA_MODE;
1767         } else {
1768                 txmode = tc;
1769                 rxmode = SF_DMA_MODE;
1770         }
1771
1772         /* configure all channels */
1773         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1774                 for (chan = 0; chan < rx_channels_count; chan++)
1775                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1776                                                    rxfifosz);
1777
1778                 for (chan = 0; chan < tx_channels_count; chan++)
1779                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1780         } else {
1781                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1782                                         rxfifosz);
1783         }
1784 }
1785
1786 /**
1787  * stmmac_tx_clean - to manage the transmission completion
1788  * @priv: driver private structure
1789  * @queue: TX queue index
1790  * Description: it reclaims the transmit resources after transmission completes.
1791  */
1792 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1793 {
1794         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1795         unsigned int bytes_compl = 0, pkts_compl = 0;
1796         unsigned int entry = tx_q->dirty_tx;
1797
1798         netif_tx_lock(priv->dev);
1799
1800         priv->xstats.tx_clean++;
1801
1802         while (entry != tx_q->cur_tx) {
1803                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1804                 struct dma_desc *p;
1805                 int status;
1806
1807                 if (priv->extend_desc)
1808                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1809                 else
1810                         p = tx_q->dma_tx + entry;
1811
1812                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1813                                                       &priv->xstats, p,
1814                                                       priv->ioaddr);
1815                 /* Check if the descriptor is owned by the DMA */
1816                 if (unlikely(status & tx_dma_own))
1817                         break;
1818
1819                 /* Just consider the last segment and ...*/
1820                 if (likely(!(status & tx_not_ls))) {
1821                         /* ... verify the status error condition */
1822                         if (unlikely(status & tx_err)) {
1823                                 priv->dev->stats.tx_errors++;
1824                         } else {
1825                                 priv->dev->stats.tx_packets++;
1826                                 priv->xstats.tx_pkt_n++;
1827                         }
1828                         stmmac_get_tx_hwtstamp(priv, p, skb);
1829                 }
1830
1831                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1832                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1833                                 dma_unmap_page(priv->device,
1834                                                tx_q->tx_skbuff_dma[entry].buf,
1835                                                tx_q->tx_skbuff_dma[entry].len,
1836                                                DMA_TO_DEVICE);
1837                         else
1838                                 dma_unmap_single(priv->device,
1839                                                  tx_q->tx_skbuff_dma[entry].buf,
1840                                                  tx_q->tx_skbuff_dma[entry].len,
1841                                                  DMA_TO_DEVICE);
1842                         tx_q->tx_skbuff_dma[entry].buf = 0;
1843                         tx_q->tx_skbuff_dma[entry].len = 0;
1844                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1845                 }
1846
1847                 if (priv->hw->mode->clean_desc3)
1848                         priv->hw->mode->clean_desc3(tx_q, p);
1849
1850                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1851                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1852
1853                 if (likely(skb != NULL)) {
1854                         pkts_compl++;
1855                         bytes_compl += skb->len;
1856                         dev_consume_skb_any(skb);
1857                         tx_q->tx_skbuff[entry] = NULL;
1858                 }
1859
1860                 priv->hw->desc->release_tx_desc(p, priv->mode);
1861
1862                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1863         }
1864         tx_q->dirty_tx = entry;
1865
1866         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1867                                   pkts_compl, bytes_compl);
1868
1869         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1870                                                                 queue))) &&
1871             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1872
1873                 netif_dbg(priv, tx_done, priv->dev,
1874                           "%s: restart transmit\n", __func__);
1875                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1876         }
1877
1878         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1879                 stmmac_enable_eee_mode(priv);
1880                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1881         }
1882         netif_tx_unlock(priv->dev);
1883 }
1884
1885 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1886 {
1887         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1888 }
1889
1890 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1891 {
1892         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1893 }
1894
1895 /**
1896  * stmmac_tx_err - to manage the tx error
1897  * @priv: driver private structure
1898  * @chan: channel index
1899  * Description: it cleans the descriptors and restarts the transmission
1900  * in case of transmission errors.
1901  */
1902 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1903 {
1904         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1905         int i;
1906
1907         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1908
1909         stmmac_stop_tx_dma(priv, chan);
1910         dma_free_tx_skbufs(priv, chan);
1911         for (i = 0; i < DMA_TX_SIZE; i++)
1912                 if (priv->extend_desc)
1913                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1914                                                      priv->mode,
1915                                                      (i == DMA_TX_SIZE - 1));
1916                 else
1917                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1918                                                      priv->mode,
1919                                                      (i == DMA_TX_SIZE - 1));
1920         tx_q->dirty_tx = 0;
1921         tx_q->cur_tx = 0;
1922         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1923         stmmac_start_tx_dma(priv, chan);
1924
1925         priv->dev->stats.tx_errors++;
1926         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1927 }
1928
1929 /**
1930  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1931  *  @priv: driver private structure
1932  *  @txmode: TX operating mode
1933  *  @rxmode: RX operating mode
1934  *  @chan: channel index
1935  *  Description: it is used for configuring of the DMA operation mode in
1936  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1937  *  mode.
1938  */
1939 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1940                                           u32 rxmode, u32 chan)
1941 {
1942         int rxfifosz = priv->plat->rx_fifo_size;
1943
1944         if (rxfifosz == 0)
1945                 rxfifosz = priv->dma_cap.rx_fifo_size;
1946
1947         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1948                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1949                                            rxfifosz);
1950                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1951         } else {
1952                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1953                                         rxfifosz);
1954         }
1955 }
1956
1957 /**
1958  * stmmac_dma_interrupt - DMA ISR
1959  * @priv: driver private structure
1960  * Description: this is the DMA ISR. It is called by the main ISR.
1961  * It calls the dwmac dma routine and schedule poll method in case of some
1962  * work can be done.
1963  */
1964 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1965 {
1966         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1967         int status;
1968         u32 chan;
1969
1970         for (chan = 0; chan < tx_channel_count; chan++) {
1971                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1972
1973                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1974                                                       &priv->xstats, chan);
1975                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1976                         if (likely(napi_schedule_prep(&rx_q->napi))) {
1977                                 stmmac_disable_dma_irq(priv, chan);
1978                                 __napi_schedule(&rx_q->napi);
1979                         }
1980                 }
1981
1982                 if (unlikely(status & tx_hard_error_bump_tc)) {
1983                         /* Try to bump up the dma threshold on this failure */
1984                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1985                             (tc <= 256)) {
1986                                 tc += 64;
1987                                 if (priv->plat->force_thresh_dma_mode)
1988                                         stmmac_set_dma_operation_mode(priv,
1989                                                                       tc,
1990                                                                       tc,
1991                                                                       chan);
1992                                 else
1993                                         stmmac_set_dma_operation_mode(priv,
1994                                                                     tc,
1995                                                                     SF_DMA_MODE,
1996                                                                     chan);
1997                                 priv->xstats.threshold = tc;
1998                         }
1999                 } else if (unlikely(status == tx_hard_error)) {
2000                         stmmac_tx_err(priv, chan);
2001                 }
2002         }
2003 }
2004
2005 /**
2006  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2007  * @priv: driver private structure
2008  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2009  */
2010 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2011 {
2012         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2013                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2014
2015         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2016                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2017                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2018         } else {
2019                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2020                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2021         }
2022
2023         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2024
2025         if (priv->dma_cap.rmon) {
2026                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2027                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2028         } else
2029                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2030 }
2031
2032 /**
2033  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2034  * @priv: driver private structure
2035  * Description: select the Enhanced/Alternate or Normal descriptors.
2036  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2037  * supported by the HW capability register.
2038  */
2039 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2040 {
2041         if (priv->plat->enh_desc) {
2042                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2043
2044                 /* GMAC older than 3.50 has no extended descriptors */
2045                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2046                         dev_info(priv->device, "Enabled extended descriptors\n");
2047                         priv->extend_desc = 1;
2048                 } else
2049                         dev_warn(priv->device, "Extended descriptors not supported\n");
2050
2051                 priv->hw->desc = &enh_desc_ops;
2052         } else {
2053                 dev_info(priv->device, "Normal descriptors\n");
2054                 priv->hw->desc = &ndesc_ops;
2055         }
2056 }
2057
2058 /**
2059  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2060  * @priv: driver private structure
2061  * Description:
2062  *  new GMAC chip generations have a new register to indicate the
2063  *  presence of the optional feature/functions.
2064  *  This can be also used to override the value passed through the
2065  *  platform and necessary for old MAC10/100 and GMAC chips.
2066  */
2067 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2068 {
2069         u32 ret = 0;
2070
2071         if (priv->hw->dma->get_hw_feature) {
2072                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2073                                               &priv->dma_cap);
2074                 ret = 1;
2075         }
2076
2077         return ret;
2078 }
2079
2080 /**
2081  * stmmac_check_ether_addr - check if the MAC addr is valid
2082  * @priv: driver private structure
2083  * Description:
2084  * it is to verify if the MAC address is valid, in case of failures it
2085  * generates a random MAC address
2086  */
2087 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2088 {
2089         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2090                 priv->hw->mac->get_umac_addr(priv->hw,
2091                                              priv->dev->dev_addr, 0);
2092                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2093                         eth_hw_addr_random(priv->dev);
2094                 netdev_info(priv->dev, "device MAC address %pM\n",
2095                             priv->dev->dev_addr);
2096         }
2097 }
2098
2099 /**
2100  * stmmac_init_dma_engine - DMA init.
2101  * @priv: driver private structure
2102  * Description:
2103  * It inits the DMA invoking the specific MAC/GMAC callback.
2104  * Some DMA parameters can be passed from the platform;
2105  * in case of these are not passed a default is kept for the MAC or GMAC.
2106  */
2107 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2108 {
2109         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2110         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2111         struct stmmac_rx_queue *rx_q;
2112         struct stmmac_tx_queue *tx_q;
2113         u32 dummy_dma_rx_phy = 0;
2114         u32 dummy_dma_tx_phy = 0;
2115         u32 chan = 0;
2116         int atds = 0;
2117         int ret = 0;
2118
2119         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2120                 dev_err(priv->device, "Invalid DMA configuration\n");
2121                 return -EINVAL;
2122         }
2123
2124         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2125                 atds = 1;
2126
2127         ret = priv->hw->dma->reset(priv->ioaddr);
2128         if (ret) {
2129                 dev_err(priv->device, "Failed to reset the dma\n");
2130                 return ret;
2131         }
2132
2133         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2134                 /* DMA Configuration */
2135                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2136                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2137
2138                 /* DMA RX Channel Configuration */
2139                 for (chan = 0; chan < rx_channels_count; chan++) {
2140                         rx_q = &priv->rx_queue[chan];
2141
2142                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2143                                                     priv->plat->dma_cfg,
2144                                                     rx_q->dma_rx_phy, chan);
2145
2146                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2147                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2148                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2149                                                        rx_q->rx_tail_addr,
2150                                                        chan);
2151                 }
2152
2153                 /* DMA TX Channel Configuration */
2154                 for (chan = 0; chan < tx_channels_count; chan++) {
2155                         tx_q = &priv->tx_queue[chan];
2156
2157                         priv->hw->dma->init_chan(priv->ioaddr,
2158                                                  priv->plat->dma_cfg,
2159                                                  chan);
2160
2161                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2162                                                     priv->plat->dma_cfg,
2163                                                     tx_q->dma_tx_phy, chan);
2164
2165                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2166                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2167                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2168                                                        tx_q->tx_tail_addr,
2169                                                        chan);
2170                 }
2171         } else {
2172                 rx_q = &priv->rx_queue[chan];
2173                 tx_q = &priv->tx_queue[chan];
2174                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2175                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2176         }
2177
2178         if (priv->plat->axi && priv->hw->dma->axi)
2179                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2180
2181         return ret;
2182 }
2183
2184 /**
2185  * stmmac_tx_timer - mitigation sw timer for tx.
2186  * @data: data pointer
2187  * Description:
2188  * This is the timer handler to directly invoke the stmmac_tx_clean.
2189  */
2190 static void stmmac_tx_timer(unsigned long data)
2191 {
2192         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2193         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2194         u32 queue;
2195
2196         /* let's scan all the tx queues */
2197         for (queue = 0; queue < tx_queues_count; queue++)
2198                 stmmac_tx_clean(priv, queue);
2199 }
2200
2201 /**
2202  * stmmac_init_tx_coalesce - init tx mitigation options.
2203  * @priv: driver private structure
2204  * Description:
2205  * This inits the transmit coalesce parameters: i.e. timer rate,
2206  * timer handler and default threshold used for enabling the
2207  * interrupt on completion bit.
2208  */
2209 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2210 {
2211         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2212         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2213         init_timer(&priv->txtimer);
2214         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2215         priv->txtimer.data = (unsigned long)priv;
2216         priv->txtimer.function = stmmac_tx_timer;
2217         add_timer(&priv->txtimer);
2218 }
2219
2220 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2221 {
2222         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2223         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2224         u32 chan;
2225
2226         /* set TX ring length */
2227         if (priv->hw->dma->set_tx_ring_len) {
2228                 for (chan = 0; chan < tx_channels_count; chan++)
2229                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2230                                                        (DMA_TX_SIZE - 1), chan);
2231         }
2232
2233         /* set RX ring length */
2234         if (priv->hw->dma->set_rx_ring_len) {
2235                 for (chan = 0; chan < rx_channels_count; chan++)
2236                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2237                                                        (DMA_RX_SIZE - 1), chan);
2238         }
2239 }
2240
2241 /**
2242  *  stmmac_set_tx_queue_weight - Set TX queue weight
2243  *  @priv: driver private structure
2244  *  Description: It is used for setting TX queues weight
2245  */
2246 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2247 {
2248         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2249         u32 weight;
2250         u32 queue;
2251
2252         for (queue = 0; queue < tx_queues_count; queue++) {
2253                 weight = priv->plat->tx_queues_cfg[queue].weight;
2254                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2255         }
2256 }
2257
2258 /**
2259  *  stmmac_configure_cbs - Configure CBS in TX queue
2260  *  @priv: driver private structure
2261  *  Description: It is used for configuring CBS in AVB TX queues
2262  */
2263 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2264 {
2265         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2266         u32 mode_to_use;
2267         u32 queue;
2268
2269         /* queue 0 is reserved for legacy traffic */
2270         for (queue = 1; queue < tx_queues_count; queue++) {
2271                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2272                 if (mode_to_use == MTL_QUEUE_DCB)
2273                         continue;
2274
2275                 priv->hw->mac->config_cbs(priv->hw,
2276                                 priv->plat->tx_queues_cfg[queue].send_slope,
2277                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2278                                 priv->plat->tx_queues_cfg[queue].high_credit,
2279                                 priv->plat->tx_queues_cfg[queue].low_credit,
2280                                 queue);
2281         }
2282 }
2283
2284 /**
2285  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2286  *  @priv: driver private structure
2287  *  Description: It is used for mapping RX queues to RX dma channels
2288  */
2289 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2290 {
2291         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2292         u32 queue;
2293         u32 chan;
2294
2295         for (queue = 0; queue < rx_queues_count; queue++) {
2296                 chan = priv->plat->rx_queues_cfg[queue].chan;
2297                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2298         }
2299 }
2300
2301 /**
2302  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2303  *  @priv: driver private structure
2304  *  Description: It is used for configuring the RX Queue Priority
2305  */
2306 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2307 {
2308         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2309         u32 queue;
2310         u32 prio;
2311
2312         for (queue = 0; queue < rx_queues_count; queue++) {
2313                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2314                         continue;
2315
2316                 prio = priv->plat->rx_queues_cfg[queue].prio;
2317                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2318         }
2319 }
2320
2321 /**
2322  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2323  *  @priv: driver private structure
2324  *  Description: It is used for configuring the TX Queue Priority
2325  */
2326 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2327 {
2328         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2329         u32 queue;
2330         u32 prio;
2331
2332         for (queue = 0; queue < tx_queues_count; queue++) {
2333                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2334                         continue;
2335
2336                 prio = priv->plat->tx_queues_cfg[queue].prio;
2337                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2338         }
2339 }
2340
2341 /**
2342  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2343  *  @priv: driver private structure
2344  *  Description: It is used for configuring the RX queue routing
2345  */
2346 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2347 {
2348         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2349         u32 queue;
2350         u8 packet;
2351
2352         for (queue = 0; queue < rx_queues_count; queue++) {
2353                 /* no specific packet type routing specified for the queue */
2354                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2355                         continue;
2356
2357                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2358                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2359         }
2360 }
2361
2362 /**
2363  *  stmmac_mtl_configuration - Configure MTL
2364  *  @priv: driver private structure
2365  *  Description: It is used for configurring MTL
2366  */
2367 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2368 {
2369         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2370         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2371
2372         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2373                 stmmac_set_tx_queue_weight(priv);
2374
2375         /* Configure MTL RX algorithms */
2376         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2377                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2378                                                 priv->plat->rx_sched_algorithm);
2379
2380         /* Configure MTL TX algorithms */
2381         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2382                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2383                                                 priv->plat->tx_sched_algorithm);
2384
2385         /* Configure CBS in AVB TX queues */
2386         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2387                 stmmac_configure_cbs(priv);
2388
2389         /* Map RX MTL to DMA channels */
2390         if (priv->hw->mac->map_mtl_to_dma)
2391                 stmmac_rx_queue_dma_chan_map(priv);
2392
2393         /* Enable MAC RX Queues */
2394         if (priv->hw->mac->rx_queue_enable)
2395                 stmmac_mac_enable_rx_queues(priv);
2396
2397         /* Set RX priorities */
2398         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2399                 stmmac_mac_config_rx_queues_prio(priv);
2400
2401         /* Set TX priorities */
2402         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2403                 stmmac_mac_config_tx_queues_prio(priv);
2404
2405         /* Set RX routing */
2406         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2407                 stmmac_mac_config_rx_queues_routing(priv);
2408 }
2409
2410 /**
2411  * stmmac_hw_setup - setup mac in a usable state.
2412  *  @dev : pointer to the device structure.
2413  *  Description:
2414  *  this is the main function to setup the HW in a usable state because the
2415  *  dma engine is reset, the core registers are configured (e.g. AXI,
2416  *  Checksum features, timers). The DMA is ready to start receiving and
2417  *  transmitting.
2418  *  Return value:
2419  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2420  *  file on failure.
2421  */
2422 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2423 {
2424         struct stmmac_priv *priv = netdev_priv(dev);
2425         u32 rx_cnt = priv->plat->rx_queues_to_use;
2426         u32 tx_cnt = priv->plat->tx_queues_to_use;
2427         u32 chan;
2428         int ret;
2429
2430         /* DMA initialization and SW reset */
2431         ret = stmmac_init_dma_engine(priv);
2432         if (ret < 0) {
2433                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2434                            __func__);
2435                 return ret;
2436         }
2437
2438         /* Copy the MAC addr into the HW  */
2439         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2440
2441         /* PS and related bits will be programmed according to the speed */
2442         if (priv->hw->pcs) {
2443                 int speed = priv->plat->mac_port_sel_speed;
2444
2445                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2446                     (speed == SPEED_1000)) {
2447                         priv->hw->ps = speed;
2448                 } else {
2449                         dev_warn(priv->device, "invalid port speed\n");
2450                         priv->hw->ps = 0;
2451                 }
2452         }
2453
2454         /* Initialize the MAC Core */
2455         priv->hw->mac->core_init(priv->hw, dev->mtu);
2456
2457         /* Initialize MTL*/
2458         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2459                 stmmac_mtl_configuration(priv);
2460
2461         ret = priv->hw->mac->rx_ipc(priv->hw);
2462         if (!ret) {
2463                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2464                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2465                 priv->hw->rx_csum = 0;
2466         }
2467
2468         /* Enable the MAC Rx/Tx */
2469         priv->hw->mac->set_mac(priv->ioaddr, true);
2470
2471         /* Set the HW DMA mode and the COE */
2472         stmmac_dma_operation_mode(priv);
2473
2474         stmmac_mmc_setup(priv);
2475
2476         if (init_ptp) {
2477                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2478                 if (ret < 0)
2479                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2480
2481                 ret = stmmac_init_ptp(priv);
2482                 if (ret == -EOPNOTSUPP)
2483                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2484                 else if (ret)
2485                         netdev_warn(priv->dev, "PTP init failed\n");
2486         }
2487
2488 #ifdef CONFIG_DEBUG_FS
2489         ret = stmmac_init_fs(dev);
2490         if (ret < 0)
2491                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2492                             __func__);
2493 #endif
2494         /* Start the ball rolling... */
2495         stmmac_start_all_dma(priv);
2496
2497         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2498
2499         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2500                 priv->rx_riwt = MAX_DMA_RIWT;
2501                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2502         }
2503
2504         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2505                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2506
2507         /* set TX and RX rings length */
2508         stmmac_set_rings_length(priv);
2509
2510         /* Enable TSO */
2511         if (priv->tso) {
2512                 for (chan = 0; chan < tx_cnt; chan++)
2513                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2514         }
2515
2516         return 0;
2517 }
2518
2519 static void stmmac_hw_teardown(struct net_device *dev)
2520 {
2521         struct stmmac_priv *priv = netdev_priv(dev);
2522
2523         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2524 }
2525
2526 /**
2527  *  stmmac_open - open entry point of the driver
2528  *  @dev : pointer to the device structure.
2529  *  Description:
2530  *  This function is the open entry point of the driver.
2531  *  Return value:
2532  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2533  *  file on failure.
2534  */
2535 static int stmmac_open(struct net_device *dev)
2536 {
2537         struct stmmac_priv *priv = netdev_priv(dev);
2538         int ret;
2539
2540         stmmac_check_ether_addr(priv);
2541
2542         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2543             priv->hw->pcs != STMMAC_PCS_TBI &&
2544             priv->hw->pcs != STMMAC_PCS_RTBI) {
2545                 ret = stmmac_init_phy(dev);
2546                 if (ret) {
2547                         netdev_err(priv->dev,
2548                                    "%s: Cannot attach to PHY (error: %d)\n",
2549                                    __func__, ret);
2550                         return ret;
2551                 }
2552         }
2553
2554         /* Extra statistics */
2555         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2556         priv->xstats.threshold = tc;
2557
2558         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2559         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2560
2561         ret = alloc_dma_desc_resources(priv);
2562         if (ret < 0) {
2563                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2564                            __func__);
2565                 goto dma_desc_error;
2566         }
2567
2568         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2569         if (ret < 0) {
2570                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2571                            __func__);
2572                 goto init_error;
2573         }
2574
2575         ret = stmmac_hw_setup(dev, true);
2576         if (ret < 0) {
2577                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2578                 goto init_error;
2579         }
2580
2581         stmmac_init_tx_coalesce(priv);
2582
2583         if (dev->phydev)
2584                 phy_start(dev->phydev);
2585
2586         /* Request the IRQ lines */
2587         ret = request_irq(dev->irq, stmmac_interrupt,
2588                           IRQF_SHARED, dev->name, dev);
2589         if (unlikely(ret < 0)) {
2590                 netdev_err(priv->dev,
2591                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2592                            __func__, dev->irq, ret);
2593                 goto irq_error;
2594         }
2595
2596         /* Request the Wake IRQ in case of another line is used for WoL */
2597         if (priv->wol_irq != dev->irq) {
2598                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2599                                   IRQF_SHARED, dev->name, dev);
2600                 if (unlikely(ret < 0)) {
2601                         netdev_err(priv->dev,
2602                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2603                                    __func__, priv->wol_irq, ret);
2604                         goto wolirq_error;
2605                 }
2606         }
2607
2608         /* Request the IRQ lines */
2609         if (priv->lpi_irq > 0) {
2610                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2611                                   dev->name, dev);
2612                 if (unlikely(ret < 0)) {
2613                         netdev_err(priv->dev,
2614                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2615                                    __func__, priv->lpi_irq, ret);
2616                         goto lpiirq_error;
2617                 }
2618         }
2619
2620         stmmac_enable_all_queues(priv);
2621         stmmac_start_all_queues(priv);
2622
2623         return 0;
2624
2625 lpiirq_error:
2626         if (priv->wol_irq != dev->irq)
2627                 free_irq(priv->wol_irq, dev);
2628 wolirq_error:
2629         free_irq(dev->irq, dev);
2630 irq_error:
2631         if (dev->phydev)
2632                 phy_stop(dev->phydev);
2633
2634         del_timer_sync(&priv->txtimer);
2635         stmmac_hw_teardown(dev);
2636 init_error:
2637         free_dma_desc_resources(priv);
2638 dma_desc_error:
2639         if (dev->phydev)
2640                 phy_disconnect(dev->phydev);
2641
2642         return ret;
2643 }
2644
2645 /**
2646  *  stmmac_release - close entry point of the driver
2647  *  @dev : device pointer.
2648  *  Description:
2649  *  This is the stop entry point of the driver.
2650  */
2651 static int stmmac_release(struct net_device *dev)
2652 {
2653         struct stmmac_priv *priv = netdev_priv(dev);
2654
2655         if (priv->eee_enabled)
2656                 del_timer_sync(&priv->eee_ctrl_timer);
2657
2658         /* Stop and disconnect the PHY */
2659         if (dev->phydev) {
2660                 phy_stop(dev->phydev);
2661                 phy_disconnect(dev->phydev);
2662         }
2663
2664         stmmac_stop_all_queues(priv);
2665
2666         stmmac_disable_all_queues(priv);
2667
2668         del_timer_sync(&priv->txtimer);
2669
2670         /* Free the IRQ lines */
2671         free_irq(dev->irq, dev);
2672         if (priv->wol_irq != dev->irq)
2673                 free_irq(priv->wol_irq, dev);
2674         if (priv->lpi_irq > 0)
2675                 free_irq(priv->lpi_irq, dev);
2676
2677         /* Stop TX/RX DMA and clear the descriptors */
2678         stmmac_stop_all_dma(priv);
2679
2680         /* Release and free the Rx/Tx resources */
2681         free_dma_desc_resources(priv);
2682
2683         /* Disable the MAC Rx/Tx */
2684         priv->hw->mac->set_mac(priv->ioaddr, false);
2685
2686         netif_carrier_off(dev);
2687
2688 #ifdef CONFIG_DEBUG_FS
2689         stmmac_exit_fs(dev);
2690 #endif
2691
2692         stmmac_release_ptp(priv);
2693
2694         return 0;
2695 }
2696
2697 /**
2698  *  stmmac_tso_allocator - close entry point of the driver
2699  *  @priv: driver private structure
2700  *  @des: buffer start address
2701  *  @total_len: total length to fill in descriptors
2702  *  @last_segmant: condition for the last descriptor
2703  *  @queue: TX queue index
2704  *  Description:
2705  *  This function fills descriptor and request new descriptors according to
2706  *  buffer length to fill
2707  */
2708 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2709                                  int total_len, bool last_segment, u32 queue)
2710 {
2711         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2712         struct dma_desc *desc;
2713         u32 buff_size;
2714         int tmp_len;
2715
2716         tmp_len = total_len;
2717
2718         while (tmp_len > 0) {
2719                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2720                 desc = tx_q->dma_tx + tx_q->cur_tx;
2721
2722                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2723                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2724                             TSO_MAX_BUFF_SIZE : tmp_len;
2725
2726                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2727                         0, 1,
2728                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2729                         0, 0);
2730
2731                 tmp_len -= TSO_MAX_BUFF_SIZE;
2732         }
2733 }
2734
2735 /**
2736  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2737  *  @skb : the socket buffer
2738  *  @dev : device pointer
2739  *  Description: this is the transmit function that is called on TSO frames
2740  *  (support available on GMAC4 and newer chips).
2741  *  Diagram below show the ring programming in case of TSO frames:
2742  *
2743  *  First Descriptor
2744  *   --------
2745  *   | DES0 |---> buffer1 = L2/L3/L4 header
2746  *   | DES1 |---> TCP Payload (can continue on next descr...)
2747  *   | DES2 |---> buffer 1 and 2 len
2748  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2749  *   --------
2750  *      |
2751  *     ...
2752  *      |
2753  *   --------
2754  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2755  *   | DES1 | --|
2756  *   | DES2 | --> buffer 1 and 2 len
2757  *   | DES3 |
2758  *   --------
2759  *
2760  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2761  */
2762 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2763 {
2764         struct dma_desc *desc, *first, *mss_desc = NULL;
2765         struct stmmac_priv *priv = netdev_priv(dev);
2766         int nfrags = skb_shinfo(skb)->nr_frags;
2767         u32 queue = skb_get_queue_mapping(skb);
2768         unsigned int first_entry, des;
2769         struct stmmac_tx_queue *tx_q;
2770         int tmp_pay_len = 0;
2771         u32 pay_len, mss;
2772         u8 proto_hdr_len;
2773         int i;
2774
2775         tx_q = &priv->tx_queue[queue];
2776
2777         /* Compute header lengths */
2778         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2779
2780         /* Desc availability based on threshold should be enough safe */
2781         if (unlikely(stmmac_tx_avail(priv, queue) <
2782                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2783                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2784                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2785                                                                 queue));
2786                         /* This is a hard error, log it. */
2787                         netdev_err(priv->dev,
2788                                    "%s: Tx Ring full when queue awake\n",
2789                                    __func__);
2790                 }
2791                 return NETDEV_TX_BUSY;
2792         }
2793
2794         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2795
2796         mss = skb_shinfo(skb)->gso_size;
2797
2798         /* set new MSS value if needed */
2799         if (mss != priv->mss) {
2800                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2801                 priv->hw->desc->set_mss(mss_desc, mss);
2802                 priv->mss = mss;
2803                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2804         }
2805
2806         if (netif_msg_tx_queued(priv)) {
2807                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2808                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2809                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2810                         skb->data_len);
2811         }
2812
2813         first_entry = tx_q->cur_tx;
2814
2815         desc = tx_q->dma_tx + first_entry;
2816         first = desc;
2817
2818         /* first descriptor: fill Headers on Buf1 */
2819         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2820                              DMA_TO_DEVICE);
2821         if (dma_mapping_error(priv->device, des))
2822                 goto dma_map_err;
2823
2824         tx_q->tx_skbuff_dma[first_entry].buf = des;
2825         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2826         tx_q->tx_skbuff[first_entry] = skb;
2827
2828         first->des0 = cpu_to_le32(des);
2829
2830         /* Fill start of payload in buff2 of first descriptor */
2831         if (pay_len)
2832                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2833
2834         /* If needed take extra descriptors to fill the remaining payload */
2835         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2836
2837         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2838
2839         /* Prepare fragments */
2840         for (i = 0; i < nfrags; i++) {
2841                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2842
2843                 des = skb_frag_dma_map(priv->device, frag, 0,
2844                                        skb_frag_size(frag),
2845                                        DMA_TO_DEVICE);
2846                 if (dma_mapping_error(priv->device, des))
2847                         goto dma_map_err;
2848
2849                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2850                                      (i == nfrags - 1), queue);
2851
2852                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2853                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2854                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2855                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2856         }
2857
2858         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2859
2860         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2861
2862         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2863                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2864                           __func__);
2865                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2866         }
2867
2868         dev->stats.tx_bytes += skb->len;
2869         priv->xstats.tx_tso_frames++;
2870         priv->xstats.tx_tso_nfrags += nfrags;
2871
2872         /* Manage tx mitigation */
2873         priv->tx_count_frames += nfrags + 1;
2874         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2875                 mod_timer(&priv->txtimer,
2876                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2877         } else {
2878                 priv->tx_count_frames = 0;
2879                 priv->hw->desc->set_tx_ic(desc);
2880                 priv->xstats.tx_set_ic_bit++;
2881         }
2882
2883         skb_tx_timestamp(skb);
2884
2885         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2886                      priv->hwts_tx_en)) {
2887                 /* declare that device is doing timestamping */
2888                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2889                 priv->hw->desc->enable_tx_timestamp(first);
2890         }
2891
2892         /* Complete the first descriptor before granting the DMA */
2893         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2894                         proto_hdr_len,
2895                         pay_len,
2896                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2897                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2898
2899         /* If context desc is used to change MSS */
2900         if (mss_desc)
2901                 priv->hw->desc->set_tx_owner(mss_desc);
2902
2903         /* The own bit must be the latest setting done when prepare the
2904          * descriptor and then barrier is needed to make sure that
2905          * all is coherent before granting the DMA engine.
2906          */
2907         dma_wmb();
2908
2909         if (netif_msg_pktdata(priv)) {
2910                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2911                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2912                         tx_q->cur_tx, first, nfrags);
2913
2914                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2915                                              0);
2916
2917                 pr_info(">>> frame to be transmitted: ");
2918                 print_pkt(skb->data, skb_headlen(skb));
2919         }
2920
2921         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2922
2923         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2924                                        queue);
2925
2926         return NETDEV_TX_OK;
2927
2928 dma_map_err:
2929         dev_err(priv->device, "Tx dma map failed\n");
2930         dev_kfree_skb(skb);
2931         priv->dev->stats.tx_dropped++;
2932         return NETDEV_TX_OK;
2933 }
2934
2935 /**
2936  *  stmmac_xmit - Tx entry point of the driver
2937  *  @skb : the socket buffer
2938  *  @dev : device pointer
2939  *  Description : this is the tx entry point of the driver.
2940  *  It programs the chain or the ring and supports oversized frames
2941  *  and SG feature.
2942  */
2943 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2944 {
2945         struct stmmac_priv *priv = netdev_priv(dev);
2946         unsigned int nopaged_len = skb_headlen(skb);
2947         int i, csum_insertion = 0, is_jumbo = 0;
2948         u32 queue = skb_get_queue_mapping(skb);
2949         int nfrags = skb_shinfo(skb)->nr_frags;
2950         unsigned int entry, first_entry;
2951         struct dma_desc *desc, *first;
2952         struct stmmac_tx_queue *tx_q;
2953         unsigned int enh_desc;
2954         unsigned int des;
2955
2956         tx_q = &priv->tx_queue[queue];
2957
2958         /* Manage oversized TCP frames for GMAC4 device */
2959         if (skb_is_gso(skb) && priv->tso) {
2960                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2961                         return stmmac_tso_xmit(skb, dev);
2962         }
2963
2964         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2965                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2966                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2967                                                                 queue));
2968                         /* This is a hard error, log it. */
2969                         netdev_err(priv->dev,
2970                                    "%s: Tx Ring full when queue awake\n",
2971                                    __func__);
2972                 }
2973                 return NETDEV_TX_BUSY;
2974         }
2975
2976         if (priv->tx_path_in_lpi_mode)
2977                 stmmac_disable_eee_mode(priv);
2978
2979         entry = tx_q->cur_tx;
2980         first_entry = entry;
2981
2982         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2983
2984         if (likely(priv->extend_desc))
2985                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2986         else
2987                 desc = tx_q->dma_tx + entry;
2988
2989         first = desc;
2990
2991         tx_q->tx_skbuff[first_entry] = skb;
2992
2993         enh_desc = priv->plat->enh_desc;
2994         /* To program the descriptors according to the size of the frame */
2995         if (enh_desc)
2996                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2997
2998         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2999                                          DWMAC_CORE_4_00)) {
3000                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3001                 if (unlikely(entry < 0))
3002                         goto dma_map_err;
3003         }
3004
3005         for (i = 0; i < nfrags; i++) {
3006                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3007                 int len = skb_frag_size(frag);
3008                 bool last_segment = (i == (nfrags - 1));
3009
3010                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3011
3012                 if (likely(priv->extend_desc))
3013                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3014                 else
3015                         desc = tx_q->dma_tx + entry;
3016
3017                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3018                                        DMA_TO_DEVICE);
3019                 if (dma_mapping_error(priv->device, des))
3020                         goto dma_map_err; /* should reuse desc w/o issues */
3021
3022                 tx_q->tx_skbuff[entry] = NULL;
3023
3024                 tx_q->tx_skbuff_dma[entry].buf = des;
3025                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3026                         desc->des0 = cpu_to_le32(des);
3027                 else
3028                         desc->des2 = cpu_to_le32(des);
3029
3030                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3031                 tx_q->tx_skbuff_dma[entry].len = len;
3032                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3033
3034                 /* Prepare the descriptor and set the own bit too */
3035                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3036                                                 priv->mode, 1, last_segment,
3037                                                 skb->len);
3038         }
3039
3040         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3041
3042         tx_q->cur_tx = entry;
3043
3044         if (netif_msg_pktdata(priv)) {
3045                 void *tx_head;
3046
3047                 netdev_dbg(priv->dev,
3048                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3049                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3050                            entry, first, nfrags);
3051
3052                 if (priv->extend_desc)
3053                         tx_head = (void *)tx_q->dma_etx;
3054                 else
3055                         tx_head = (void *)tx_q->dma_tx;
3056
3057                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3058
3059                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3060                 print_pkt(skb->data, skb->len);
3061         }
3062
3063         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3064                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3065                           __func__);
3066                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3067         }
3068
3069         dev->stats.tx_bytes += skb->len;
3070
3071         /* According to the coalesce parameter the IC bit for the latest
3072          * segment is reset and the timer re-started to clean the tx status.
3073          * This approach takes care about the fragments: desc is the first
3074          * element in case of no SG.
3075          */
3076         priv->tx_count_frames += nfrags + 1;
3077         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3078                 mod_timer(&priv->txtimer,
3079                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3080         } else {
3081                 priv->tx_count_frames = 0;
3082                 priv->hw->desc->set_tx_ic(desc);
3083                 priv->xstats.tx_set_ic_bit++;
3084         }
3085
3086         skb_tx_timestamp(skb);
3087
3088         /* Ready to fill the first descriptor and set the OWN bit w/o any
3089          * problems because all the descriptors are actually ready to be
3090          * passed to the DMA engine.
3091          */
3092         if (likely(!is_jumbo)) {
3093                 bool last_segment = (nfrags == 0);
3094
3095                 des = dma_map_single(priv->device, skb->data,
3096                                      nopaged_len, DMA_TO_DEVICE);
3097                 if (dma_mapping_error(priv->device, des))
3098                         goto dma_map_err;
3099
3100                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3101                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3102                         first->des0 = cpu_to_le32(des);
3103                 else
3104                         first->des2 = cpu_to_le32(des);
3105
3106                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3107                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3108
3109                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3110                              priv->hwts_tx_en)) {
3111                         /* declare that device is doing timestamping */
3112                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3113                         priv->hw->desc->enable_tx_timestamp(first);
3114                 }
3115
3116                 /* Prepare the first descriptor setting the OWN bit too */
3117                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3118                                                 csum_insertion, priv->mode, 1,
3119                                                 last_segment, skb->len);
3120
3121                 /* The own bit must be the latest setting done when prepare the
3122                  * descriptor and then barrier is needed to make sure that
3123                  * all is coherent before granting the DMA engine.
3124                  */
3125                 dma_wmb();
3126         }
3127
3128         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3129
3130         if (priv->synopsys_id < DWMAC_CORE_4_00)
3131                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3132         else
3133                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3134                                                queue);
3135
3136         return NETDEV_TX_OK;
3137
3138 dma_map_err:
3139         netdev_err(priv->dev, "Tx DMA map failed\n");
3140         dev_kfree_skb(skb);
3141         priv->dev->stats.tx_dropped++;
3142         return NETDEV_TX_OK;
3143 }
3144
3145 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3146 {
3147         struct ethhdr *ehdr;
3148         u16 vlanid;
3149
3150         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3151             NETIF_F_HW_VLAN_CTAG_RX &&
3152             !__vlan_get_tag(skb, &vlanid)) {
3153                 /* pop the vlan tag */
3154                 ehdr = (struct ethhdr *)skb->data;
3155                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3156                 skb_pull(skb, VLAN_HLEN);
3157                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3158         }
3159 }
3160
3161
3162 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3163 {
3164         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3165                 return 0;
3166
3167         return 1;
3168 }
3169
3170 /**
3171  * stmmac_rx_refill - refill used skb preallocated buffers
3172  * @priv: driver private structure
3173  * @queue: RX queue index
3174  * Description : this is to reallocate the skb for the reception process
3175  * that is based on zero-copy.
3176  */
3177 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3178 {
3179         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3180         int dirty = stmmac_rx_dirty(priv, queue);
3181         unsigned int entry = rx_q->dirty_rx;
3182
3183         int bfsize = priv->dma_buf_sz;
3184
3185         while (dirty-- > 0) {
3186                 struct dma_desc *p;
3187
3188                 if (priv->extend_desc)
3189                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3190                 else
3191                         p = rx_q->dma_rx + entry;
3192
3193                 if (likely(!rx_q->rx_skbuff[entry])) {
3194                         struct sk_buff *skb;
3195
3196                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3197                         if (unlikely(!skb)) {
3198                                 /* so for a while no zero-copy! */
3199                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3200                                 if (unlikely(net_ratelimit()))
3201                                         dev_err(priv->device,
3202                                                 "fail to alloc skb entry %d\n",
3203                                                 entry);
3204                                 break;
3205                         }
3206
3207                         rx_q->rx_skbuff[entry] = skb;
3208                         rx_q->rx_skbuff_dma[entry] =
3209                             dma_map_single(priv->device, skb->data, bfsize,
3210                                            DMA_FROM_DEVICE);
3211                         if (dma_mapping_error(priv->device,
3212                                               rx_q->rx_skbuff_dma[entry])) {
3213                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3214                                 dev_kfree_skb(skb);
3215                                 break;
3216                         }
3217
3218                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3219                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3220                                 p->des1 = 0;
3221                         } else {
3222                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3223                         }
3224                         if (priv->hw->mode->refill_desc3)
3225                                 priv->hw->mode->refill_desc3(rx_q, p);
3226
3227                         if (rx_q->rx_zeroc_thresh > 0)
3228                                 rx_q->rx_zeroc_thresh--;
3229
3230                         netif_dbg(priv, rx_status, priv->dev,
3231                                   "refill entry #%d\n", entry);
3232                 }
3233                 dma_wmb();
3234
3235                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3236                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3237                 else
3238                         priv->hw->desc->set_rx_owner(p);
3239
3240                 dma_wmb();
3241
3242                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3243         }
3244         rx_q->dirty_rx = entry;
3245 }
3246
3247 /**
3248  * stmmac_rx - manage the receive process
3249  * @priv: driver private structure
3250  * @limit: napi bugget
3251  * @queue: RX queue index.
3252  * Description :  this the function called by the napi poll method.
3253  * It gets all the frames inside the ring.
3254  */
3255 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3256 {
3257         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3258         unsigned int entry = rx_q->cur_rx;
3259         int coe = priv->hw->rx_csum;
3260         unsigned int next_entry;
3261         unsigned int count = 0;
3262
3263         if (netif_msg_rx_status(priv)) {
3264                 void *rx_head;
3265
3266                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3267                 if (priv->extend_desc)
3268                         rx_head = (void *)rx_q->dma_erx;
3269                 else
3270                         rx_head = (void *)rx_q->dma_rx;
3271
3272                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3273         }
3274         while (count < limit) {
3275                 int status;
3276                 struct dma_desc *p;
3277                 struct dma_desc *np;
3278
3279                 if (priv->extend_desc)
3280                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3281                 else
3282                         p = rx_q->dma_rx + entry;
3283
3284                 /* read the status of the incoming frame */
3285                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3286                                                    &priv->xstats, p);
3287                 /* check if managed by the DMA otherwise go ahead */
3288                 if (unlikely(status & dma_own))
3289                         break;
3290
3291                 count++;
3292
3293                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3294                 next_entry = rx_q->cur_rx;
3295
3296                 if (priv->extend_desc)
3297                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3298                 else
3299                         np = rx_q->dma_rx + next_entry;
3300
3301                 prefetch(np);
3302
3303                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3304                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3305                                                            &priv->xstats,
3306                                                            rx_q->dma_erx +
3307                                                            entry);
3308                 if (unlikely(status == discard_frame)) {
3309                         priv->dev->stats.rx_errors++;
3310                         if (priv->hwts_rx_en && !priv->extend_desc) {
3311                                 /* DESC2 & DESC3 will be overwritten by device
3312                                  * with timestamp value, hence reinitialize
3313                                  * them in stmmac_rx_refill() function so that
3314                                  * device can reuse it.
3315                                  */
3316                                 rx_q->rx_skbuff[entry] = NULL;
3317                                 dma_unmap_single(priv->device,
3318                                                  rx_q->rx_skbuff_dma[entry],
3319                                                  priv->dma_buf_sz,
3320                                                  DMA_FROM_DEVICE);
3321                         }
3322                 } else {
3323                         struct sk_buff *skb;
3324                         int frame_len;
3325                         unsigned int des;
3326
3327                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3328                                 des = le32_to_cpu(p->des0);
3329                         else
3330                                 des = le32_to_cpu(p->des2);
3331
3332                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3333
3334                         /*  If frame length is greater than skb buffer size
3335                          *  (preallocated during init) then the packet is
3336                          *  ignored
3337                          */
3338                         if (frame_len > priv->dma_buf_sz) {
3339                                 netdev_err(priv->dev,
3340                                            "len %d larger than size (%d)\n",
3341                                            frame_len, priv->dma_buf_sz);
3342                                 priv->dev->stats.rx_length_errors++;
3343                                 break;
3344                         }
3345
3346                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3347                          * Type frames (LLC/LLC-SNAP)
3348                          */
3349                         if (unlikely(status != llc_snap))
3350                                 frame_len -= ETH_FCS_LEN;
3351
3352                         if (netif_msg_rx_status(priv)) {
3353                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3354                                            p, entry, des);
3355                                 if (frame_len > ETH_FRAME_LEN)
3356                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3357                                                    frame_len, status);
3358                         }
3359
3360                         /* The zero-copy is always used for all the sizes
3361                          * in case of GMAC4 because it needs
3362                          * to refill the used descriptors, always.
3363                          */
3364                         if (unlikely(!priv->plat->has_gmac4 &&
3365                                      ((frame_len < priv->rx_copybreak) ||
3366                                      stmmac_rx_threshold_count(rx_q)))) {
3367                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3368                                                                 frame_len);
3369                                 if (unlikely(!skb)) {
3370                                         if (net_ratelimit())
3371                                                 dev_warn(priv->device,
3372                                                          "packet dropped\n");
3373                                         priv->dev->stats.rx_dropped++;
3374                                         break;
3375                                 }
3376
3377                                 dma_sync_single_for_cpu(priv->device,
3378                                                         rx_q->rx_skbuff_dma
3379                                                         [entry], frame_len,
3380                                                         DMA_FROM_DEVICE);
3381                                 skb_copy_to_linear_data(skb,
3382                                                         rx_q->
3383                                                         rx_skbuff[entry]->data,
3384                                                         frame_len);
3385
3386                                 skb_put(skb, frame_len);
3387                                 dma_sync_single_for_device(priv->device,
3388                                                            rx_q->rx_skbuff_dma
3389                                                            [entry], frame_len,
3390                                                            DMA_FROM_DEVICE);
3391                         } else {
3392                                 skb = rx_q->rx_skbuff[entry];
3393                                 if (unlikely(!skb)) {
3394                                         netdev_err(priv->dev,
3395                                                    "%s: Inconsistent Rx chain\n",
3396                                                    priv->dev->name);
3397                                         priv->dev->stats.rx_dropped++;
3398                                         break;
3399                                 }
3400                                 prefetch(skb->data - NET_IP_ALIGN);
3401                                 rx_q->rx_skbuff[entry] = NULL;
3402                                 rx_q->rx_zeroc_thresh++;
3403
3404                                 skb_put(skb, frame_len);
3405                                 dma_unmap_single(priv->device,
3406                                                  rx_q->rx_skbuff_dma[entry],
3407                                                  priv->dma_buf_sz,
3408                                                  DMA_FROM_DEVICE);
3409                         }
3410
3411                         if (netif_msg_pktdata(priv)) {
3412                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3413                                            frame_len);
3414                                 print_pkt(skb->data, frame_len);
3415                         }
3416
3417                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3418
3419                         stmmac_rx_vlan(priv->dev, skb);
3420
3421                         skb->protocol = eth_type_trans(skb, priv->dev);
3422
3423                         if (unlikely(!coe))
3424                                 skb_checksum_none_assert(skb);
3425                         else
3426                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3427
3428                         napi_gro_receive(&rx_q->napi, skb);
3429
3430                         priv->dev->stats.rx_packets++;
3431                         priv->dev->stats.rx_bytes += frame_len;
3432                 }
3433                 entry = next_entry;
3434         }
3435
3436         stmmac_rx_refill(priv, queue);
3437
3438         priv->xstats.rx_pkt_n += count;
3439
3440         return count;
3441 }
3442
3443 /**
3444  *  stmmac_poll - stmmac poll method (NAPI)
3445  *  @napi : pointer to the napi structure.
3446  *  @budget : maximum number of packets that the current CPU can receive from
3447  *            all interfaces.
3448  *  Description :
3449  *  To look at the incoming frames and clear the tx resources.
3450  */
3451 static int stmmac_poll(struct napi_struct *napi, int budget)
3452 {
3453         struct stmmac_rx_queue *rx_q =
3454                 container_of(napi, struct stmmac_rx_queue, napi);
3455         struct stmmac_priv *priv = rx_q->priv_data;
3456         u32 tx_count = priv->plat->tx_queues_to_use;
3457         u32 chan = rx_q->queue_index;
3458         int work_done = 0;
3459         u32 queue;
3460
3461         priv->xstats.napi_poll++;
3462
3463         /* check all the queues */
3464         for (queue = 0; queue < tx_count; queue++)
3465                 stmmac_tx_clean(priv, queue);
3466
3467         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3468         if (work_done < budget) {
3469                 napi_complete_done(napi, work_done);
3470                 stmmac_enable_dma_irq(priv, chan);
3471         }
3472         return work_done;
3473 }
3474
3475 /**
3476  *  stmmac_tx_timeout
3477  *  @dev : Pointer to net device structure
3478  *  Description: this function is called when a packet transmission fails to
3479  *   complete within a reasonable time. The driver will mark the error in the
3480  *   netdev structure and arrange for the device to be reset to a sane state
3481  *   in order to transmit a new packet.
3482  */
3483 static void stmmac_tx_timeout(struct net_device *dev)
3484 {
3485         struct stmmac_priv *priv = netdev_priv(dev);
3486         u32 tx_count = priv->plat->tx_queues_to_use;
3487         u32 chan;
3488
3489         /* Clear Tx resources and restart transmitting again */
3490         for (chan = 0; chan < tx_count; chan++)
3491                 stmmac_tx_err(priv, chan);
3492 }
3493
3494 /**
3495  *  stmmac_set_rx_mode - entry point for multicast addressing
3496  *  @dev : pointer to the device structure
3497  *  Description:
3498  *  This function is a driver entry point which gets called by the kernel
3499  *  whenever multicast addresses must be enabled/disabled.
3500  *  Return value:
3501  *  void.
3502  */
3503 static void stmmac_set_rx_mode(struct net_device *dev)
3504 {
3505         struct stmmac_priv *priv = netdev_priv(dev);
3506
3507         priv->hw->mac->set_filter(priv->hw, dev);
3508 }
3509
3510 /**
3511  *  stmmac_change_mtu - entry point to change MTU size for the device.
3512  *  @dev : device pointer.
3513  *  @new_mtu : the new MTU size for the device.
3514  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3515  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3516  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3517  *  Return value:
3518  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3519  *  file on failure.
3520  */
3521 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3522 {
3523         struct stmmac_priv *priv = netdev_priv(dev);
3524
3525         if (netif_running(dev)) {
3526                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3527                 return -EBUSY;
3528         }
3529
3530         dev->mtu = new_mtu;
3531
3532         netdev_update_features(dev);
3533
3534         return 0;
3535 }
3536
3537 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3538                                              netdev_features_t features)
3539 {
3540         struct stmmac_priv *priv = netdev_priv(dev);
3541
3542         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3543                 features &= ~NETIF_F_RXCSUM;
3544
3545         if (!priv->plat->tx_coe)
3546                 features &= ~NETIF_F_CSUM_MASK;
3547
3548         /* Some GMAC devices have a bugged Jumbo frame support that
3549          * needs to have the Tx COE disabled for oversized frames
3550          * (due to limited buffer sizes). In this case we disable
3551          * the TX csum insertion in the TDES and not use SF.
3552          */
3553         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3554                 features &= ~NETIF_F_CSUM_MASK;
3555
3556         /* Disable tso if asked by ethtool */
3557         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3558                 if (features & NETIF_F_TSO)
3559                         priv->tso = true;
3560                 else
3561                         priv->tso = false;
3562         }
3563
3564         return features;
3565 }
3566
3567 static int stmmac_set_features(struct net_device *netdev,
3568                                netdev_features_t features)
3569 {
3570         struct stmmac_priv *priv = netdev_priv(netdev);
3571
3572         /* Keep the COE Type in case of csum is supporting */
3573         if (features & NETIF_F_RXCSUM)
3574                 priv->hw->rx_csum = priv->plat->rx_coe;
3575         else
3576                 priv->hw->rx_csum = 0;
3577         /* No check needed because rx_coe has been set before and it will be
3578          * fixed in case of issue.
3579          */
3580         priv->hw->mac->rx_ipc(priv->hw);
3581
3582         return 0;
3583 }
3584
3585 /**
3586  *  stmmac_interrupt - main ISR
3587  *  @irq: interrupt number.
3588  *  @dev_id: to pass the net device pointer.
3589  *  Description: this is the main driver interrupt service routine.
3590  *  It can call:
3591  *  o DMA service routine (to manage incoming frame reception and transmission
3592  *    status)
3593  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3594  *    interrupts.
3595  */
3596 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3597 {
3598         struct net_device *dev = (struct net_device *)dev_id;
3599         struct stmmac_priv *priv = netdev_priv(dev);
3600         u32 rx_cnt = priv->plat->rx_queues_to_use;
3601         u32 tx_cnt = priv->plat->tx_queues_to_use;
3602         u32 queues_count;
3603         u32 queue;
3604
3605         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3606
3607         if (priv->irq_wake)
3608                 pm_wakeup_event(priv->device, 0);
3609
3610         if (unlikely(!dev)) {
3611                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3612                 return IRQ_NONE;
3613         }
3614
3615         /* To handle GMAC own interrupts */
3616         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3617                 int status = priv->hw->mac->host_irq_status(priv->hw,
3618                                                             &priv->xstats);
3619
3620                 if (unlikely(status)) {
3621                         /* For LPI we need to save the tx status */
3622                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3623                                 priv->tx_path_in_lpi_mode = true;
3624                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3625                                 priv->tx_path_in_lpi_mode = false;
3626                 }
3627
3628                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3629                         for (queue = 0; queue < queues_count; queue++) {
3630                                 struct stmmac_rx_queue *rx_q =
3631                                 &priv->rx_queue[queue];
3632
3633                                 status |=
3634                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3635                                                                    queue);
3636
3637                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3638                                     priv->hw->dma->set_rx_tail_ptr)
3639                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3640                                                                 rx_q->rx_tail_addr,
3641                                                                 queue);
3642                         }
3643                 }
3644
3645                 /* PCS link status */
3646                 if (priv->hw->pcs) {
3647                         if (priv->xstats.pcs_link)
3648                                 netif_carrier_on(dev);
3649                         else
3650                                 netif_carrier_off(dev);
3651                 }
3652         }
3653
3654         /* To handle DMA interrupts */
3655         stmmac_dma_interrupt(priv);
3656
3657         return IRQ_HANDLED;
3658 }
3659
3660 #ifdef CONFIG_NET_POLL_CONTROLLER
3661 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3662  * to allow network I/O with interrupts disabled.
3663  */
3664 static void stmmac_poll_controller(struct net_device *dev)
3665 {
3666         disable_irq(dev->irq);
3667         stmmac_interrupt(dev->irq, dev);
3668         enable_irq(dev->irq);
3669 }
3670 #endif
3671
3672 /**
3673  *  stmmac_ioctl - Entry point for the Ioctl
3674  *  @dev: Device pointer.
3675  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3676  *  a proprietary structure used to pass information to the driver.
3677  *  @cmd: IOCTL command
3678  *  Description:
3679  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3680  */
3681 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3682 {
3683         int ret = -EOPNOTSUPP;
3684
3685         if (!netif_running(dev))
3686                 return -EINVAL;
3687
3688         switch (cmd) {
3689         case SIOCGMIIPHY:
3690         case SIOCGMIIREG:
3691         case SIOCSMIIREG:
3692                 if (!dev->phydev)
3693                         return -EINVAL;
3694                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3695                 break;
3696         case SIOCSHWTSTAMP:
3697                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3698                 break;
3699         default:
3700                 break;
3701         }
3702
3703         return ret;
3704 }
3705
3706 #ifdef CONFIG_DEBUG_FS
3707 static struct dentry *stmmac_fs_dir;
3708
3709 static void sysfs_display_ring(void *head, int size, int extend_desc,
3710                                struct seq_file *seq)
3711 {
3712         int i;
3713         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3714         struct dma_desc *p = (struct dma_desc *)head;
3715
3716         for (i = 0; i < size; i++) {
3717                 if (extend_desc) {
3718                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3719                                    i, (unsigned int)virt_to_phys(ep),
3720                                    le32_to_cpu(ep->basic.des0),
3721                                    le32_to_cpu(ep->basic.des1),
3722                                    le32_to_cpu(ep->basic.des2),
3723                                    le32_to_cpu(ep->basic.des3));
3724                         ep++;
3725                 } else {
3726                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3727                                    i, (unsigned int)virt_to_phys(p),
3728                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3729                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3730                         p++;
3731                 }
3732                 seq_printf(seq, "\n");
3733         }
3734 }
3735
3736 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3737 {
3738         struct net_device *dev = seq->private;
3739         struct stmmac_priv *priv = netdev_priv(dev);
3740         u32 rx_count = priv->plat->rx_queues_to_use;
3741         u32 tx_count = priv->plat->tx_queues_to_use;
3742         u32 queue;
3743
3744         for (queue = 0; queue < rx_count; queue++) {
3745                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3746
3747                 seq_printf(seq, "RX Queue %d:\n", queue);
3748
3749                 if (priv->extend_desc) {
3750                         seq_printf(seq, "Extended descriptor ring:\n");
3751                         sysfs_display_ring((void *)rx_q->dma_erx,
3752                                            DMA_RX_SIZE, 1, seq);
3753                 } else {
3754                         seq_printf(seq, "Descriptor ring:\n");
3755                         sysfs_display_ring((void *)rx_q->dma_rx,
3756                                            DMA_RX_SIZE, 0, seq);
3757                 }
3758         }
3759
3760         for (queue = 0; queue < tx_count; queue++) {
3761                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3762
3763                 seq_printf(seq, "TX Queue %d:\n", queue);
3764
3765                 if (priv->extend_desc) {
3766                         seq_printf(seq, "Extended descriptor ring:\n");
3767                         sysfs_display_ring((void *)tx_q->dma_etx,
3768                                            DMA_TX_SIZE, 1, seq);
3769                 } else {
3770                         seq_printf(seq, "Descriptor ring:\n");
3771                         sysfs_display_ring((void *)tx_q->dma_tx,
3772                                            DMA_TX_SIZE, 0, seq);
3773                 }
3774         }
3775
3776         return 0;
3777 }
3778
3779 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3780 {
3781         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3782 }
3783
3784 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3785
3786 static const struct file_operations stmmac_rings_status_fops = {
3787         .owner = THIS_MODULE,
3788         .open = stmmac_sysfs_ring_open,
3789         .read = seq_read,
3790         .llseek = seq_lseek,
3791         .release = single_release,
3792 };
3793
3794 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3795 {
3796         struct net_device *dev = seq->private;
3797         struct stmmac_priv *priv = netdev_priv(dev);
3798
3799         if (!priv->hw_cap_support) {
3800                 seq_printf(seq, "DMA HW features not supported\n");
3801                 return 0;
3802         }
3803
3804         seq_printf(seq, "==============================\n");
3805         seq_printf(seq, "\tDMA HW features\n");
3806         seq_printf(seq, "==============================\n");
3807
3808         seq_printf(seq, "\t10/100 Mbps: %s\n",
3809                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3810         seq_printf(seq, "\t1000 Mbps: %s\n",
3811                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3812         seq_printf(seq, "\tHalf duplex: %s\n",
3813                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3814         seq_printf(seq, "\tHash Filter: %s\n",
3815                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3816         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3817                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3818         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3819                    (priv->dma_cap.pcs) ? "Y" : "N");
3820         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3821                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3822         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3823                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3824         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3825                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3826         seq_printf(seq, "\tRMON module: %s\n",
3827                    (priv->dma_cap.rmon) ? "Y" : "N");
3828         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3829                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3830         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3831                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3832         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3833                    (priv->dma_cap.eee) ? "Y" : "N");
3834         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3835         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3836                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3837         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3838                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3839                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3840         } else {
3841                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3842                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3843                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3844                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3845         }
3846         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3847                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3848         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3849                    priv->dma_cap.number_rx_channel);
3850         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3851                    priv->dma_cap.number_tx_channel);
3852         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3853                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3854
3855         return 0;
3856 }
3857
3858 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3859 {
3860         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3861 }
3862
3863 static const struct file_operations stmmac_dma_cap_fops = {
3864         .owner = THIS_MODULE,
3865         .open = stmmac_sysfs_dma_cap_open,
3866         .read = seq_read,
3867         .llseek = seq_lseek,
3868         .release = single_release,
3869 };
3870
3871 static int stmmac_init_fs(struct net_device *dev)
3872 {
3873         struct stmmac_priv *priv = netdev_priv(dev);
3874
3875         /* Create per netdev entries */
3876         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3877
3878         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3879                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3880
3881                 return -ENOMEM;
3882         }
3883
3884         /* Entry to report DMA RX/TX rings */
3885         priv->dbgfs_rings_status =
3886                 debugfs_create_file("descriptors_status", S_IRUGO,
3887                                     priv->dbgfs_dir, dev,
3888                                     &stmmac_rings_status_fops);
3889
3890         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3891                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3892                 debugfs_remove_recursive(priv->dbgfs_dir);
3893
3894                 return -ENOMEM;
3895         }
3896
3897         /* Entry to report the DMA HW features */
3898         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3899                                             priv->dbgfs_dir,
3900                                             dev, &stmmac_dma_cap_fops);
3901
3902         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3903                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3904                 debugfs_remove_recursive(priv->dbgfs_dir);
3905
3906                 return -ENOMEM;
3907         }
3908
3909         return 0;
3910 }
3911
3912 static void stmmac_exit_fs(struct net_device *dev)
3913 {
3914         struct stmmac_priv *priv = netdev_priv(dev);
3915
3916         debugfs_remove_recursive(priv->dbgfs_dir);
3917 }
3918 #endif /* CONFIG_DEBUG_FS */
3919
3920 static const struct net_device_ops stmmac_netdev_ops = {
3921         .ndo_open = stmmac_open,
3922         .ndo_start_xmit = stmmac_xmit,
3923         .ndo_stop = stmmac_release,
3924         .ndo_change_mtu = stmmac_change_mtu,
3925         .ndo_fix_features = stmmac_fix_features,
3926         .ndo_set_features = stmmac_set_features,
3927         .ndo_set_rx_mode = stmmac_set_rx_mode,
3928         .ndo_tx_timeout = stmmac_tx_timeout,
3929         .ndo_do_ioctl = stmmac_ioctl,
3930 #ifdef CONFIG_NET_POLL_CONTROLLER
3931         .ndo_poll_controller = stmmac_poll_controller,
3932 #endif
3933         .ndo_set_mac_address = eth_mac_addr,
3934 };
3935
3936 /**
3937  *  stmmac_hw_init - Init the MAC device
3938  *  @priv: driver private structure
3939  *  Description: this function is to configure the MAC device according to
3940  *  some platform parameters or the HW capability register. It prepares the
3941  *  driver to use either ring or chain modes and to setup either enhanced or
3942  *  normal descriptors.
3943  */
3944 static int stmmac_hw_init(struct stmmac_priv *priv)
3945 {
3946         struct mac_device_info *mac;
3947
3948         /* Identify the MAC HW device */
3949         if (priv->plat->has_gmac) {
3950                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3951                 mac = dwmac1000_setup(priv->ioaddr,
3952                                       priv->plat->multicast_filter_bins,
3953                                       priv->plat->unicast_filter_entries,
3954                                       &priv->synopsys_id);
3955         } else if (priv->plat->has_gmac4) {
3956                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3957                 mac = dwmac4_setup(priv->ioaddr,
3958                                    priv->plat->multicast_filter_bins,
3959                                    priv->plat->unicast_filter_entries,
3960                                    &priv->synopsys_id);
3961         } else {
3962                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3963         }
3964         if (!mac)
3965                 return -ENOMEM;
3966
3967         priv->hw = mac;
3968
3969         /* To use the chained or ring mode */
3970         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3971                 priv->hw->mode = &dwmac4_ring_mode_ops;
3972         } else {
3973                 if (chain_mode) {
3974                         priv->hw->mode = &chain_mode_ops;
3975                         dev_info(priv->device, "Chain mode enabled\n");
3976                         priv->mode = STMMAC_CHAIN_MODE;
3977                 } else {
3978                         priv->hw->mode = &ring_mode_ops;
3979                         dev_info(priv->device, "Ring mode enabled\n");
3980                         priv->mode = STMMAC_RING_MODE;
3981                 }
3982         }
3983
3984         /* Get the HW capability (new GMAC newer than 3.50a) */
3985         priv->hw_cap_support = stmmac_get_hw_features(priv);
3986         if (priv->hw_cap_support) {
3987                 dev_info(priv->device, "DMA HW capability register supported\n");
3988
3989                 /* We can override some gmac/dma configuration fields: e.g.
3990                  * enh_desc, tx_coe (e.g. that are passed through the
3991                  * platform) with the values from the HW capability
3992                  * register (if supported).
3993                  */
3994                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3995                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3996                 priv->hw->pmt = priv->plat->pmt;
3997
3998                 /* TXCOE doesn't work in thresh DMA mode */
3999                 if (priv->plat->force_thresh_dma_mode)
4000                         priv->plat->tx_coe = 0;
4001                 else
4002                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4003
4004                 /* In case of GMAC4 rx_coe is from HW cap register. */
4005                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4006
4007                 if (priv->dma_cap.rx_coe_type2)
4008                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4009                 else if (priv->dma_cap.rx_coe_type1)
4010                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4011
4012         } else {
4013                 dev_info(priv->device, "No HW DMA feature register supported\n");
4014         }
4015
4016         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4017         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4018                 priv->hw->desc = &dwmac4_desc_ops;
4019         else
4020                 stmmac_selec_desc_mode(priv);
4021
4022         if (priv->plat->rx_coe) {
4023                 priv->hw->rx_csum = priv->plat->rx_coe;
4024                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4025                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4026                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4027         }
4028         if (priv->plat->tx_coe)
4029                 dev_info(priv->device, "TX Checksum insertion supported\n");
4030
4031         if (priv->plat->pmt) {
4032                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4033                 device_set_wakeup_capable(priv->device, 1);
4034         }
4035
4036         if (priv->dma_cap.tsoen)
4037                 dev_info(priv->device, "TSO supported\n");
4038
4039         return 0;
4040 }
4041
4042 /**
4043  * stmmac_dvr_probe
4044  * @device: device pointer
4045  * @plat_dat: platform data pointer
4046  * @res: stmmac resource pointer
4047  * Description: this is the main probe function used to
4048  * call the alloc_etherdev, allocate the priv structure.
4049  * Return:
4050  * returns 0 on success, otherwise errno.
4051  */
4052 int stmmac_dvr_probe(struct device *device,
4053                      struct plat_stmmacenet_data *plat_dat,
4054                      struct stmmac_resources *res)
4055 {
4056         struct net_device *ndev = NULL;
4057         struct stmmac_priv *priv;
4058         int ret = 0;
4059         u32 queue;
4060
4061         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4062                                   MTL_MAX_TX_QUEUES,
4063                                   MTL_MAX_RX_QUEUES);
4064         if (!ndev)
4065                 return -ENOMEM;
4066
4067         SET_NETDEV_DEV(ndev, device);
4068
4069         priv = netdev_priv(ndev);
4070         priv->device = device;
4071         priv->dev = ndev;
4072
4073         stmmac_set_ethtool_ops(ndev);
4074         priv->pause = pause;
4075         priv->plat = plat_dat;
4076         priv->ioaddr = res->addr;
4077         priv->dev->base_addr = (unsigned long)res->addr;
4078
4079         priv->dev->irq = res->irq;
4080         priv->wol_irq = res->wol_irq;
4081         priv->lpi_irq = res->lpi_irq;
4082
4083         if (res->mac)
4084                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4085
4086         dev_set_drvdata(device, priv->dev);
4087
4088         /* Verify driver arguments */
4089         stmmac_verify_args();
4090
4091         /* Override with kernel parameters if supplied XXX CRS XXX
4092          * this needs to have multiple instances
4093          */
4094         if ((phyaddr >= 0) && (phyaddr <= 31))
4095                 priv->plat->phy_addr = phyaddr;
4096
4097         if (priv->plat->stmmac_rst)
4098                 reset_control_deassert(priv->plat->stmmac_rst);
4099
4100         /* Init MAC and get the capabilities */
4101         ret = stmmac_hw_init(priv);
4102         if (ret)
4103                 goto error_hw_init;
4104
4105         /* Configure real RX and TX queues */
4106         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4107         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4108
4109         ndev->netdev_ops = &stmmac_netdev_ops;
4110
4111         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4112                             NETIF_F_RXCSUM;
4113
4114         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4115                 ndev->hw_features |= NETIF_F_TSO;
4116                 priv->tso = true;
4117                 dev_info(priv->device, "TSO feature enabled\n");
4118         }
4119         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4120         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4121 #ifdef STMMAC_VLAN_TAG_USED
4122         /* Both mac100 and gmac support receive VLAN tag detection */
4123         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4124 #endif
4125         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4126
4127         /* MTU range: 46 - hw-specific max */
4128         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4129         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4130                 ndev->max_mtu = JUMBO_LEN;
4131         else
4132                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4133         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4134          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4135          */
4136         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4137             (priv->plat->maxmtu >= ndev->min_mtu))
4138                 ndev->max_mtu = priv->plat->maxmtu;
4139         else if (priv->plat->maxmtu < ndev->min_mtu)
4140                 dev_warn(priv->device,
4141                          "%s: warning: maxmtu having invalid value (%d)\n",
4142                          __func__, priv->plat->maxmtu);
4143
4144         if (flow_ctrl)
4145                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4146
4147         /* Rx Watchdog is available in the COREs newer than the 3.40.
4148          * In some case, for example on bugged HW this feature
4149          * has to be disable and this can be done by passing the
4150          * riwt_off field from the platform.
4151          */
4152         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4153                 priv->use_riwt = 1;
4154                 dev_info(priv->device,
4155                          "Enable RX Mitigation via HW Watchdog Timer\n");
4156         }
4157
4158         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4159                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4160
4161                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4162                                (8 * priv->plat->rx_queues_to_use));
4163         }
4164
4165         spin_lock_init(&priv->lock);
4166
4167         /* If a specific clk_csr value is passed from the platform
4168          * this means that the CSR Clock Range selection cannot be
4169          * changed at run-time and it is fixed. Viceversa the driver'll try to
4170          * set the MDC clock dynamically according to the csr actual
4171          * clock input.
4172          */
4173         if (!priv->plat->clk_csr)
4174                 stmmac_clk_csr_set(priv);
4175         else
4176                 priv->clk_csr = priv->plat->clk_csr;
4177
4178         stmmac_check_pcs_mode(priv);
4179
4180         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4181             priv->hw->pcs != STMMAC_PCS_TBI &&
4182             priv->hw->pcs != STMMAC_PCS_RTBI) {
4183                 /* MDIO bus Registration */
4184                 ret = stmmac_mdio_register(ndev);
4185                 if (ret < 0) {
4186                         dev_err(priv->device,
4187                                 "%s: MDIO bus (id: %d) registration failed",
4188                                 __func__, priv->plat->bus_id);
4189                         goto error_mdio_register;
4190                 }
4191         }
4192
4193         ret = register_netdev(ndev);
4194         if (ret) {
4195                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4196                         __func__, ret);
4197                 goto error_netdev_register;
4198         }
4199
4200         return ret;
4201
4202 error_netdev_register:
4203         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4204             priv->hw->pcs != STMMAC_PCS_TBI &&
4205             priv->hw->pcs != STMMAC_PCS_RTBI)
4206                 stmmac_mdio_unregister(ndev);
4207 error_mdio_register:
4208         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4209                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4210
4211                 netif_napi_del(&rx_q->napi);
4212         }
4213 error_hw_init:
4214         free_netdev(ndev);
4215
4216         return ret;
4217 }
4218 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4219
4220 /**
4221  * stmmac_dvr_remove
4222  * @dev: device pointer
4223  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4224  * changes the link status, releases the DMA descriptor rings.
4225  */
4226 int stmmac_dvr_remove(struct device *dev)
4227 {
4228         struct net_device *ndev = dev_get_drvdata(dev);
4229         struct stmmac_priv *priv = netdev_priv(ndev);
4230
4231         netdev_info(priv->dev, "%s: removing driver", __func__);
4232
4233         stmmac_stop_all_dma(priv);
4234
4235         priv->hw->mac->set_mac(priv->ioaddr, false);
4236         netif_carrier_off(ndev);
4237         unregister_netdev(ndev);
4238         if (priv->plat->stmmac_rst)
4239                 reset_control_assert(priv->plat->stmmac_rst);
4240         clk_disable_unprepare(priv->plat->pclk);
4241         clk_disable_unprepare(priv->plat->stmmac_clk);
4242         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4243             priv->hw->pcs != STMMAC_PCS_TBI &&
4244             priv->hw->pcs != STMMAC_PCS_RTBI)
4245                 stmmac_mdio_unregister(ndev);
4246         free_netdev(ndev);
4247
4248         return 0;
4249 }
4250 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4251
4252 /**
4253  * stmmac_suspend - suspend callback
4254  * @dev: device pointer
4255  * Description: this is the function to suspend the device and it is called
4256  * by the platform driver to stop the network queue, release the resources,
4257  * program the PMT register (for WoL), clean and release driver resources.
4258  */
4259 int stmmac_suspend(struct device *dev)
4260 {
4261         struct net_device *ndev = dev_get_drvdata(dev);
4262         struct stmmac_priv *priv = netdev_priv(ndev);
4263         unsigned long flags;
4264
4265         if (!ndev || !netif_running(ndev))
4266                 return 0;
4267
4268         if (ndev->phydev)
4269                 phy_stop(ndev->phydev);
4270
4271         spin_lock_irqsave(&priv->lock, flags);
4272
4273         netif_device_detach(ndev);
4274         stmmac_stop_all_queues(priv);
4275
4276         stmmac_disable_all_queues(priv);
4277
4278         /* Stop TX/RX DMA */
4279         stmmac_stop_all_dma(priv);
4280
4281         /* Enable Power down mode by programming the PMT regs */
4282         if (device_may_wakeup(priv->device)) {
4283                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4284                 priv->irq_wake = 1;
4285         } else {
4286                 priv->hw->mac->set_mac(priv->ioaddr, false);
4287                 pinctrl_pm_select_sleep_state(priv->device);
4288                 /* Disable clock in case of PWM is off */
4289                 clk_disable(priv->plat->pclk);
4290                 clk_disable(priv->plat->stmmac_clk);
4291         }
4292         spin_unlock_irqrestore(&priv->lock, flags);
4293
4294         priv->oldlink = 0;
4295         priv->speed = SPEED_UNKNOWN;
4296         priv->oldduplex = DUPLEX_UNKNOWN;
4297         return 0;
4298 }
4299 EXPORT_SYMBOL_GPL(stmmac_suspend);
4300
4301 /**
4302  * stmmac_reset_queues_param - reset queue parameters
4303  * @dev: device pointer
4304  */
4305 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4306 {
4307         u32 rx_cnt = priv->plat->rx_queues_to_use;
4308         u32 tx_cnt = priv->plat->tx_queues_to_use;
4309         u32 queue;
4310
4311         for (queue = 0; queue < rx_cnt; queue++) {
4312                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4313
4314                 rx_q->cur_rx = 0;
4315                 rx_q->dirty_rx = 0;
4316         }
4317
4318         for (queue = 0; queue < tx_cnt; queue++) {
4319                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4320
4321                 tx_q->cur_tx = 0;
4322                 tx_q->dirty_tx = 0;
4323         }
4324 }
4325
4326 /**
4327  * stmmac_resume - resume callback
4328  * @dev: device pointer
4329  * Description: when resume this function is invoked to setup the DMA and CORE
4330  * in a usable state.
4331  */
4332 int stmmac_resume(struct device *dev)
4333 {
4334         struct net_device *ndev = dev_get_drvdata(dev);
4335         struct stmmac_priv *priv = netdev_priv(ndev);
4336         unsigned long flags;
4337
4338         if (!netif_running(ndev))
4339                 return 0;
4340
4341         /* Power Down bit, into the PM register, is cleared
4342          * automatically as soon as a magic packet or a Wake-up frame
4343          * is received. Anyway, it's better to manually clear
4344          * this bit because it can generate problems while resuming
4345          * from another devices (e.g. serial console).
4346          */
4347         if (device_may_wakeup(priv->device)) {
4348                 spin_lock_irqsave(&priv->lock, flags);
4349                 priv->hw->mac->pmt(priv->hw, 0);
4350                 spin_unlock_irqrestore(&priv->lock, flags);
4351                 priv->irq_wake = 0;
4352         } else {
4353                 pinctrl_pm_select_default_state(priv->device);
4354                 /* enable the clk previously disabled */
4355                 clk_enable(priv->plat->stmmac_clk);
4356                 clk_enable(priv->plat->pclk);
4357                 /* reset the phy so that it's ready */
4358                 if (priv->mii)
4359                         stmmac_mdio_reset(priv->mii);
4360         }
4361
4362         netif_device_attach(ndev);
4363
4364         spin_lock_irqsave(&priv->lock, flags);
4365
4366         stmmac_reset_queues_param(priv);
4367
4368         /* reset private mss value to force mss context settings at
4369          * next tso xmit (only used for gmac4).
4370          */
4371         priv->mss = 0;
4372
4373         stmmac_clear_descriptors(priv);
4374
4375         stmmac_hw_setup(ndev, false);
4376         stmmac_init_tx_coalesce(priv);
4377         stmmac_set_rx_mode(ndev);
4378
4379         stmmac_enable_all_queues(priv);
4380
4381         stmmac_start_all_queues(priv);
4382
4383         spin_unlock_irqrestore(&priv->lock, flags);
4384
4385         if (ndev->phydev)
4386                 phy_start(ndev->phydev);
4387
4388         return 0;
4389 }
4390 EXPORT_SYMBOL_GPL(stmmac_resume);
4391
4392 #ifndef MODULE
4393 static int __init stmmac_cmdline_opt(char *str)
4394 {
4395         char *opt;
4396
4397         if (!str || !*str)
4398                 return -EINVAL;
4399         while ((opt = strsep(&str, ",")) != NULL) {
4400                 if (!strncmp(opt, "debug:", 6)) {
4401                         if (kstrtoint(opt + 6, 0, &debug))
4402                                 goto err;
4403                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4404                         if (kstrtoint(opt + 8, 0, &phyaddr))
4405                                 goto err;
4406                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4407                         if (kstrtoint(opt + 7, 0, &buf_sz))
4408                                 goto err;
4409                 } else if (!strncmp(opt, "tc:", 3)) {
4410                         if (kstrtoint(opt + 3, 0, &tc))
4411                                 goto err;
4412                 } else if (!strncmp(opt, "watchdog:", 9)) {
4413                         if (kstrtoint(opt + 9, 0, &watchdog))
4414                                 goto err;
4415                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4416                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4417                                 goto err;
4418                 } else if (!strncmp(opt, "pause:", 6)) {
4419                         if (kstrtoint(opt + 6, 0, &pause))
4420                                 goto err;
4421                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4422                         if (kstrtoint(opt + 10, 0, &eee_timer))
4423                                 goto err;
4424                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4425                         if (kstrtoint(opt + 11, 0, &chain_mode))
4426                                 goto err;
4427                 }
4428         }
4429         return 0;
4430
4431 err:
4432         pr_err("%s: ERROR broken module parameter conversion", __func__);
4433         return -EINVAL;
4434 }
4435
4436 __setup("stmmaceth=", stmmac_cmdline_opt);
4437 #endif /* MODULE */
4438
4439 static int __init stmmac_init(void)
4440 {
4441 #ifdef CONFIG_DEBUG_FS
4442         /* Create debugfs main directory if it doesn't exist yet */
4443         if (!stmmac_fs_dir) {
4444                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4445
4446                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4447                         pr_err("ERROR %s, debugfs create directory failed\n",
4448                                STMMAC_RESOURCE_NAME);
4449
4450                         return -ENOMEM;
4451                 }
4452         }
4453 #endif
4454
4455         return 0;
4456 }
4457
4458 static void __exit stmmac_exit(void)
4459 {
4460 #ifdef CONFIG_DEBUG_FS
4461         debugfs_remove_recursive(stmmac_fs_dir);
4462 #endif
4463 }
4464
4465 module_init(stmmac_init)
4466 module_exit(stmmac_exit)
4467
4468 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4469 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4470 MODULE_LICENSE("GPL");