]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
ufs_truncate_blocks(): fix the case when size is in the last direct block
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238 }
239
240 static void print_pkt(unsigned char *buf, int len)
241 {
242         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
243         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
244 }
245
246 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
247 {
248         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
249         u32 avail;
250
251         if (tx_q->dirty_tx > tx_q->cur_tx)
252                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
253         else
254                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
255
256         return avail;
257 }
258
259 /**
260  * stmmac_rx_dirty - Get RX queue dirty
261  * @priv: driver private structure
262  * @queue: RX queue index
263  */
264 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
265 {
266         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
267         u32 dirty;
268
269         if (rx_q->dirty_rx <= rx_q->cur_rx)
270                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
271         else
272                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
273
274         return dirty;
275 }
276
277 /**
278  * stmmac_hw_fix_mac_speed - callback for speed selection
279  * @priv: driver private structure
280  * Description: on some platforms (e.g. ST), some HW system configuration
281  * registers have to be set according to the link speed negotiated.
282  */
283 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
284 {
285         struct net_device *ndev = priv->dev;
286         struct phy_device *phydev = ndev->phydev;
287
288         if (likely(priv->plat->fix_mac_speed))
289                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
290 }
291
292 /**
293  * stmmac_enable_eee_mode - check and enter in LPI mode
294  * @priv: driver private structure
295  * Description: this function is to verify and enter in LPI mode in case of
296  * EEE.
297  */
298 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
299 {
300         u32 tx_cnt = priv->plat->tx_queues_to_use;
301         u32 queue;
302
303         /* check if all TX queues have the work finished */
304         for (queue = 0; queue < tx_cnt; queue++) {
305                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
306
307                 if (tx_q->dirty_tx != tx_q->cur_tx)
308                         return; /* still unfinished work */
309         }
310
311         /* Check and enter in LPI mode */
312         if (!priv->tx_path_in_lpi_mode)
313                 priv->hw->mac->set_eee_mode(priv->hw,
314                                             priv->plat->en_tx_lpi_clockgating);
315 }
316
317 /**
318  * stmmac_disable_eee_mode - disable and exit from LPI mode
319  * @priv: driver private structure
320  * Description: this function is to exit and disable EEE in case of
321  * LPI state is true. This is called by the xmit.
322  */
323 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
324 {
325         priv->hw->mac->reset_eee_mode(priv->hw);
326         del_timer_sync(&priv->eee_ctrl_timer);
327         priv->tx_path_in_lpi_mode = false;
328 }
329
330 /**
331  * stmmac_eee_ctrl_timer - EEE TX SW timer.
332  * @arg : data hook
333  * Description:
334  *  if there is no data transfer and if we are not in LPI state,
335  *  then MAC Transmitter can be moved to LPI state.
336  */
337 static void stmmac_eee_ctrl_timer(unsigned long arg)
338 {
339         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
340
341         stmmac_enable_eee_mode(priv);
342         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
343 }
344
345 /**
346  * stmmac_eee_init - init EEE
347  * @priv: driver private structure
348  * Description:
349  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
350  *  can also manage EEE, this function enable the LPI state and start related
351  *  timer.
352  */
353 bool stmmac_eee_init(struct stmmac_priv *priv)
354 {
355         struct net_device *ndev = priv->dev;
356         unsigned long flags;
357         bool ret = false;
358
359         /* Using PCS we cannot dial with the phy registers at this stage
360          * so we do not support extra feature like EEE.
361          */
362         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
363             (priv->hw->pcs == STMMAC_PCS_TBI) ||
364             (priv->hw->pcs == STMMAC_PCS_RTBI))
365                 goto out;
366
367         /* MAC core supports the EEE feature. */
368         if (priv->dma_cap.eee) {
369                 int tx_lpi_timer = priv->tx_lpi_timer;
370
371                 /* Check if the PHY supports EEE */
372                 if (phy_init_eee(ndev->phydev, 1)) {
373                         /* To manage at run-time if the EEE cannot be supported
374                          * anymore (for example because the lp caps have been
375                          * changed).
376                          * In that case the driver disable own timers.
377                          */
378                         spin_lock_irqsave(&priv->lock, flags);
379                         if (priv->eee_active) {
380                                 netdev_dbg(priv->dev, "disable EEE\n");
381                                 del_timer_sync(&priv->eee_ctrl_timer);
382                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
383                                                              tx_lpi_timer);
384                         }
385                         priv->eee_active = 0;
386                         spin_unlock_irqrestore(&priv->lock, flags);
387                         goto out;
388                 }
389                 /* Activate the EEE and start timers */
390                 spin_lock_irqsave(&priv->lock, flags);
391                 if (!priv->eee_active) {
392                         priv->eee_active = 1;
393                         setup_timer(&priv->eee_ctrl_timer,
394                                     stmmac_eee_ctrl_timer,
395                                     (unsigned long)priv);
396                         mod_timer(&priv->eee_ctrl_timer,
397                                   STMMAC_LPI_T(eee_timer));
398
399                         priv->hw->mac->set_eee_timer(priv->hw,
400                                                      STMMAC_DEFAULT_LIT_LS,
401                                                      tx_lpi_timer);
402                 }
403                 /* Set HW EEE according to the speed */
404                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
405
406                 ret = true;
407                 spin_unlock_irqrestore(&priv->lock, flags);
408
409                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
410         }
411 out:
412         return ret;
413 }
414
415 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
416  * @priv: driver private structure
417  * @p : descriptor pointer
418  * @skb : the socket buffer
419  * Description :
420  * This function will read timestamp from the descriptor & pass it to stack.
421  * and also perform some sanity checks.
422  */
423 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
424                                    struct dma_desc *p, struct sk_buff *skb)
425 {
426         struct skb_shared_hwtstamps shhwtstamp;
427         u64 ns;
428
429         if (!priv->hwts_tx_en)
430                 return;
431
432         /* exit if skb doesn't support hw tstamp */
433         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
434                 return;
435
436         /* check tx tstamp status */
437         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
438                 /* get the valid tstamp */
439                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
440
441                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
442                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
443
444                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
445                 /* pass tstamp to stack */
446                 skb_tstamp_tx(skb, &shhwtstamp);
447         }
448
449         return;
450 }
451
452 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
453  * @priv: driver private structure
454  * @p : descriptor pointer
455  * @np : next descriptor pointer
456  * @skb : the socket buffer
457  * Description :
458  * This function will read received packet's timestamp from the descriptor
459  * and pass it to stack. It also perform some sanity checks.
460  */
461 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
462                                    struct dma_desc *np, struct sk_buff *skb)
463 {
464         struct skb_shared_hwtstamps *shhwtstamp = NULL;
465         u64 ns;
466
467         if (!priv->hwts_rx_en)
468                 return;
469
470         /* Check if timestamp is available */
471         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
472                 /* For GMAC4, the valid timestamp is from CTX next desc. */
473                 if (priv->plat->has_gmac4)
474                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
475                 else
476                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
477
478                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
479                 shhwtstamp = skb_hwtstamps(skb);
480                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
481                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
482         } else  {
483                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
484         }
485 }
486
487 /**
488  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
489  *  @dev: device pointer.
490  *  @ifr: An IOCTL specific structure, that can contain a pointer to
491  *  a proprietary structure used to pass information to the driver.
492  *  Description:
493  *  This function configures the MAC to enable/disable both outgoing(TX)
494  *  and incoming(RX) packets time stamping based on user input.
495  *  Return Value:
496  *  0 on success and an appropriate -ve integer on failure.
497  */
498 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
499 {
500         struct stmmac_priv *priv = netdev_priv(dev);
501         struct hwtstamp_config config;
502         struct timespec64 now;
503         u64 temp = 0;
504         u32 ptp_v2 = 0;
505         u32 tstamp_all = 0;
506         u32 ptp_over_ipv4_udp = 0;
507         u32 ptp_over_ipv6_udp = 0;
508         u32 ptp_over_ethernet = 0;
509         u32 snap_type_sel = 0;
510         u32 ts_master_en = 0;
511         u32 ts_event_en = 0;
512         u32 value = 0;
513         u32 sec_inc;
514
515         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
516                 netdev_alert(priv->dev, "No support for HW time stamping\n");
517                 priv->hwts_tx_en = 0;
518                 priv->hwts_rx_en = 0;
519
520                 return -EOPNOTSUPP;
521         }
522
523         if (copy_from_user(&config, ifr->ifr_data,
524                            sizeof(struct hwtstamp_config)))
525                 return -EFAULT;
526
527         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
528                    __func__, config.flags, config.tx_type, config.rx_filter);
529
530         /* reserved for future extensions */
531         if (config.flags)
532                 return -EINVAL;
533
534         if (config.tx_type != HWTSTAMP_TX_OFF &&
535             config.tx_type != HWTSTAMP_TX_ON)
536                 return -ERANGE;
537
538         if (priv->adv_ts) {
539                 switch (config.rx_filter) {
540                 case HWTSTAMP_FILTER_NONE:
541                         /* time stamp no incoming packet at all */
542                         config.rx_filter = HWTSTAMP_FILTER_NONE;
543                         break;
544
545                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
546                         /* PTP v1, UDP, any kind of event packet */
547                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
548                         /* take time stamp for all event messages */
549                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
550
551                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
552                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
553                         break;
554
555                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
556                         /* PTP v1, UDP, Sync packet */
557                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
558                         /* take time stamp for SYNC messages only */
559                         ts_event_en = PTP_TCR_TSEVNTENA;
560
561                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563                         break;
564
565                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
566                         /* PTP v1, UDP, Delay_req packet */
567                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
568                         /* take time stamp for Delay_Req messages only */
569                         ts_master_en = PTP_TCR_TSMSTRENA;
570                         ts_event_en = PTP_TCR_TSEVNTENA;
571
572                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574                         break;
575
576                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
577                         /* PTP v2, UDP, any kind of event packet */
578                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
579                         ptp_v2 = PTP_TCR_TSVER2ENA;
580                         /* take time stamp for all event messages */
581                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582
583                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585                         break;
586
587                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
588                         /* PTP v2, UDP, Sync packet */
589                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
590                         ptp_v2 = PTP_TCR_TSVER2ENA;
591                         /* take time stamp for SYNC messages only */
592                         ts_event_en = PTP_TCR_TSEVNTENA;
593
594                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596                         break;
597
598                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
599                         /* PTP v2, UDP, Delay_req packet */
600                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
601                         ptp_v2 = PTP_TCR_TSVER2ENA;
602                         /* take time stamp for Delay_Req messages only */
603                         ts_master_en = PTP_TCR_TSMSTRENA;
604                         ts_event_en = PTP_TCR_TSEVNTENA;
605
606                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
607                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
608                         break;
609
610                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
611                         /* PTP v2/802.AS1 any layer, any kind of event packet */
612                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
613                         ptp_v2 = PTP_TCR_TSVER2ENA;
614                         /* take time stamp for all event messages */
615                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
616
617                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
619                         ptp_over_ethernet = PTP_TCR_TSIPENA;
620                         break;
621
622                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
623                         /* PTP v2/802.AS1, any layer, Sync packet */
624                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
625                         ptp_v2 = PTP_TCR_TSVER2ENA;
626                         /* take time stamp for SYNC messages only */
627                         ts_event_en = PTP_TCR_TSEVNTENA;
628
629                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631                         ptp_over_ethernet = PTP_TCR_TSIPENA;
632                         break;
633
634                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
635                         /* PTP v2/802.AS1, any layer, Delay_req packet */
636                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
637                         ptp_v2 = PTP_TCR_TSVER2ENA;
638                         /* take time stamp for Delay_Req messages only */
639                         ts_master_en = PTP_TCR_TSMSTRENA;
640                         ts_event_en = PTP_TCR_TSEVNTENA;
641
642                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644                         ptp_over_ethernet = PTP_TCR_TSIPENA;
645                         break;
646
647                 case HWTSTAMP_FILTER_ALL:
648                         /* time stamp any incoming packet */
649                         config.rx_filter = HWTSTAMP_FILTER_ALL;
650                         tstamp_all = PTP_TCR_TSENALL;
651                         break;
652
653                 default:
654                         return -ERANGE;
655                 }
656         } else {
657                 switch (config.rx_filter) {
658                 case HWTSTAMP_FILTER_NONE:
659                         config.rx_filter = HWTSTAMP_FILTER_NONE;
660                         break;
661                 default:
662                         /* PTP v1, UDP, any kind of event packet */
663                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
664                         break;
665                 }
666         }
667         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
668         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
669
670         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
671                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
672         else {
673                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
674                          tstamp_all | ptp_v2 | ptp_over_ethernet |
675                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
676                          ts_master_en | snap_type_sel);
677                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
678
679                 /* program Sub Second Increment reg */
680                 sec_inc = priv->hw->ptp->config_sub_second_increment(
681                         priv->ptpaddr, priv->plat->clk_ptp_rate,
682                         priv->plat->has_gmac4);
683                 temp = div_u64(1000000000ULL, sec_inc);
684
685                 /* calculate default added value:
686                  * formula is :
687                  * addend = (2^32)/freq_div_ratio;
688                  * where, freq_div_ratio = 1e9ns/sec_inc
689                  */
690                 temp = (u64)(temp << 32);
691                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
692                 priv->hw->ptp->config_addend(priv->ptpaddr,
693                                              priv->default_addend);
694
695                 /* initialize system time */
696                 ktime_get_real_ts64(&now);
697
698                 /* lower 32 bits of tv_sec are safe until y2106 */
699                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
700                                             now.tv_nsec);
701         }
702
703         return copy_to_user(ifr->ifr_data, &config,
704                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
705 }
706
707 /**
708  * stmmac_init_ptp - init PTP
709  * @priv: driver private structure
710  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
711  * This is done by looking at the HW cap. register.
712  * This function also registers the ptp driver.
713  */
714 static int stmmac_init_ptp(struct stmmac_priv *priv)
715 {
716         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
717                 return -EOPNOTSUPP;
718
719         priv->adv_ts = 0;
720         /* Check if adv_ts can be enabled for dwmac 4.x core */
721         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
722                 priv->adv_ts = 1;
723         /* Dwmac 3.x core with extend_desc can support adv_ts */
724         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
725                 priv->adv_ts = 1;
726
727         if (priv->dma_cap.time_stamp)
728                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
729
730         if (priv->adv_ts)
731                 netdev_info(priv->dev,
732                             "IEEE 1588-2008 Advanced Timestamp supported\n");
733
734         priv->hw->ptp = &stmmac_ptp;
735         priv->hwts_tx_en = 0;
736         priv->hwts_rx_en = 0;
737
738         stmmac_ptp_register(priv);
739
740         return 0;
741 }
742
743 static void stmmac_release_ptp(struct stmmac_priv *priv)
744 {
745         if (priv->plat->clk_ptp_ref)
746                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
747         stmmac_ptp_unregister(priv);
748 }
749
750 /**
751  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
752  *  @priv: driver private structure
753  *  Description: It is used for configuring the flow control in all queues
754  */
755 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
756 {
757         u32 tx_cnt = priv->plat->tx_queues_to_use;
758
759         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
760                                  priv->pause, tx_cnt);
761 }
762
763 /**
764  * stmmac_adjust_link - adjusts the link parameters
765  * @dev: net device structure
766  * Description: this is the helper called by the physical abstraction layer
767  * drivers to communicate the phy link status. According the speed and duplex
768  * this driver can invoke registered glue-logic as well.
769  * It also invoke the eee initialization because it could happen when switch
770  * on different networks (that are eee capable).
771  */
772 static void stmmac_adjust_link(struct net_device *dev)
773 {
774         struct stmmac_priv *priv = netdev_priv(dev);
775         struct phy_device *phydev = dev->phydev;
776         unsigned long flags;
777         int new_state = 0;
778
779         if (!phydev)
780                 return;
781
782         spin_lock_irqsave(&priv->lock, flags);
783
784         if (phydev->link) {
785                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
786
787                 /* Now we make sure that we can be in full duplex mode.
788                  * If not, we operate in half-duplex mode. */
789                 if (phydev->duplex != priv->oldduplex) {
790                         new_state = 1;
791                         if (!(phydev->duplex))
792                                 ctrl &= ~priv->hw->link.duplex;
793                         else
794                                 ctrl |= priv->hw->link.duplex;
795                         priv->oldduplex = phydev->duplex;
796                 }
797                 /* Flow Control operation */
798                 if (phydev->pause)
799                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
800
801                 if (phydev->speed != priv->speed) {
802                         new_state = 1;
803                         switch (phydev->speed) {
804                         case 1000:
805                                 if (priv->plat->has_gmac ||
806                                     priv->plat->has_gmac4)
807                                         ctrl &= ~priv->hw->link.port;
808                                 break;
809                         case 100:
810                                 if (priv->plat->has_gmac ||
811                                     priv->plat->has_gmac4) {
812                                         ctrl |= priv->hw->link.port;
813                                         ctrl |= priv->hw->link.speed;
814                                 } else {
815                                         ctrl &= ~priv->hw->link.port;
816                                 }
817                                 break;
818                         case 10:
819                                 if (priv->plat->has_gmac ||
820                                     priv->plat->has_gmac4) {
821                                         ctrl |= priv->hw->link.port;
822                                         ctrl &= ~(priv->hw->link.speed);
823                                 } else {
824                                         ctrl &= ~priv->hw->link.port;
825                                 }
826                                 break;
827                         default:
828                                 netif_warn(priv, link, priv->dev,
829                                            "broken speed: %d\n", phydev->speed);
830                                 phydev->speed = SPEED_UNKNOWN;
831                                 break;
832                         }
833                         if (phydev->speed != SPEED_UNKNOWN)
834                                 stmmac_hw_fix_mac_speed(priv);
835                         priv->speed = phydev->speed;
836                 }
837
838                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
839
840                 if (!priv->oldlink) {
841                         new_state = 1;
842                         priv->oldlink = 1;
843                 }
844         } else if (priv->oldlink) {
845                 new_state = 1;
846                 priv->oldlink = 0;
847                 priv->speed = SPEED_UNKNOWN;
848                 priv->oldduplex = DUPLEX_UNKNOWN;
849         }
850
851         if (new_state && netif_msg_link(priv))
852                 phy_print_status(phydev);
853
854         spin_unlock_irqrestore(&priv->lock, flags);
855
856         if (phydev->is_pseudo_fixed_link)
857                 /* Stop PHY layer to call the hook to adjust the link in case
858                  * of a switch is attached to the stmmac driver.
859                  */
860                 phydev->irq = PHY_IGNORE_INTERRUPT;
861         else
862                 /* At this stage, init the EEE if supported.
863                  * Never called in case of fixed_link.
864                  */
865                 priv->eee_enabled = stmmac_eee_init(priv);
866 }
867
868 /**
869  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
870  * @priv: driver private structure
871  * Description: this is to verify if the HW supports the PCS.
872  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
873  * configured for the TBI, RTBI, or SGMII PHY interface.
874  */
875 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
876 {
877         int interface = priv->plat->interface;
878
879         if (priv->dma_cap.pcs) {
880                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
881                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
882                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
883                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
884                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
885                         priv->hw->pcs = STMMAC_PCS_RGMII;
886                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
887                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
888                         priv->hw->pcs = STMMAC_PCS_SGMII;
889                 }
890         }
891 }
892
893 /**
894  * stmmac_init_phy - PHY initialization
895  * @dev: net device structure
896  * Description: it initializes the driver's PHY state, and attaches the PHY
897  * to the mac driver.
898  *  Return value:
899  *  0 on success
900  */
901 static int stmmac_init_phy(struct net_device *dev)
902 {
903         struct stmmac_priv *priv = netdev_priv(dev);
904         struct phy_device *phydev;
905         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
906         char bus_id[MII_BUS_ID_SIZE];
907         int interface = priv->plat->interface;
908         int max_speed = priv->plat->max_speed;
909         priv->oldlink = 0;
910         priv->speed = SPEED_UNKNOWN;
911         priv->oldduplex = DUPLEX_UNKNOWN;
912
913         if (priv->plat->phy_node) {
914                 phydev = of_phy_connect(dev, priv->plat->phy_node,
915                                         &stmmac_adjust_link, 0, interface);
916         } else {
917                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
918                          priv->plat->bus_id);
919
920                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
921                          priv->plat->phy_addr);
922                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
923                            phy_id_fmt);
924
925                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
926                                      interface);
927         }
928
929         if (IS_ERR_OR_NULL(phydev)) {
930                 netdev_err(priv->dev, "Could not attach to PHY\n");
931                 if (!phydev)
932                         return -ENODEV;
933
934                 return PTR_ERR(phydev);
935         }
936
937         /* Stop Advertising 1000BASE Capability if interface is not GMII */
938         if ((interface == PHY_INTERFACE_MODE_MII) ||
939             (interface == PHY_INTERFACE_MODE_RMII) ||
940                 (max_speed < 1000 && max_speed > 0))
941                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
942                                          SUPPORTED_1000baseT_Full);
943
944         /*
945          * Broken HW is sometimes missing the pull-up resistor on the
946          * MDIO line, which results in reads to non-existent devices returning
947          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
948          * device as well.
949          * Note: phydev->phy_id is the result of reading the UID PHY registers.
950          */
951         if (!priv->plat->phy_node && phydev->phy_id == 0) {
952                 phy_disconnect(phydev);
953                 return -ENODEV;
954         }
955
956         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
957          * subsequent PHY polling, make sure we force a link transition if
958          * we have a UP/DOWN/UP transition
959          */
960         if (phydev->is_pseudo_fixed_link)
961                 phydev->irq = PHY_POLL;
962
963         phy_attached_info(phydev);
964         return 0;
965 }
966
967 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
968 {
969         u32 rx_cnt = priv->plat->rx_queues_to_use;
970         void *head_rx;
971         u32 queue;
972
973         /* Display RX rings */
974         for (queue = 0; queue < rx_cnt; queue++) {
975                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
976
977                 pr_info("\tRX Queue %u rings\n", queue);
978
979                 if (priv->extend_desc)
980                         head_rx = (void *)rx_q->dma_erx;
981                 else
982                         head_rx = (void *)rx_q->dma_rx;
983
984                 /* Display RX ring */
985                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
986         }
987 }
988
989 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
990 {
991         u32 tx_cnt = priv->plat->tx_queues_to_use;
992         void *head_tx;
993         u32 queue;
994
995         /* Display TX rings */
996         for (queue = 0; queue < tx_cnt; queue++) {
997                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
998
999                 pr_info("\tTX Queue %d rings\n", queue);
1000
1001                 if (priv->extend_desc)
1002                         head_tx = (void *)tx_q->dma_etx;
1003                 else
1004                         head_tx = (void *)tx_q->dma_tx;
1005
1006                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1007         }
1008 }
1009
1010 static void stmmac_display_rings(struct stmmac_priv *priv)
1011 {
1012         /* Display RX ring */
1013         stmmac_display_rx_rings(priv);
1014
1015         /* Display TX ring */
1016         stmmac_display_tx_rings(priv);
1017 }
1018
1019 static int stmmac_set_bfsize(int mtu, int bufsize)
1020 {
1021         int ret = bufsize;
1022
1023         if (mtu >= BUF_SIZE_4KiB)
1024                 ret = BUF_SIZE_8KiB;
1025         else if (mtu >= BUF_SIZE_2KiB)
1026                 ret = BUF_SIZE_4KiB;
1027         else if (mtu > DEFAULT_BUFSIZE)
1028                 ret = BUF_SIZE_2KiB;
1029         else
1030                 ret = DEFAULT_BUFSIZE;
1031
1032         return ret;
1033 }
1034
1035 /**
1036  * stmmac_clear_rx_descriptors - clear RX descriptors
1037  * @priv: driver private structure
1038  * @queue: RX queue index
1039  * Description: this function is called to clear the RX descriptors
1040  * in case of both basic and extended descriptors are used.
1041  */
1042 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1043 {
1044         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1045         int i;
1046
1047         /* Clear the RX descriptors */
1048         for (i = 0; i < DMA_RX_SIZE; i++)
1049                 if (priv->extend_desc)
1050                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1051                                                      priv->use_riwt, priv->mode,
1052                                                      (i == DMA_RX_SIZE - 1));
1053                 else
1054                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1055                                                      priv->use_riwt, priv->mode,
1056                                                      (i == DMA_RX_SIZE - 1));
1057 }
1058
1059 /**
1060  * stmmac_clear_tx_descriptors - clear tx descriptors
1061  * @priv: driver private structure
1062  * @queue: TX queue index.
1063  * Description: this function is called to clear the TX descriptors
1064  * in case of both basic and extended descriptors are used.
1065  */
1066 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1067 {
1068         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1069         int i;
1070
1071         /* Clear the TX descriptors */
1072         for (i = 0; i < DMA_TX_SIZE; i++)
1073                 if (priv->extend_desc)
1074                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1075                                                      priv->mode,
1076                                                      (i == DMA_TX_SIZE - 1));
1077                 else
1078                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1079                                                      priv->mode,
1080                                                      (i == DMA_TX_SIZE - 1));
1081 }
1082
1083 /**
1084  * stmmac_clear_descriptors - clear descriptors
1085  * @priv: driver private structure
1086  * Description: this function is called to clear the TX and RX descriptors
1087  * in case of both basic and extended descriptors are used.
1088  */
1089 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1090 {
1091         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1092         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1093         u32 queue;
1094
1095         /* Clear the RX descriptors */
1096         for (queue = 0; queue < rx_queue_cnt; queue++)
1097                 stmmac_clear_rx_descriptors(priv, queue);
1098
1099         /* Clear the TX descriptors */
1100         for (queue = 0; queue < tx_queue_cnt; queue++)
1101                 stmmac_clear_tx_descriptors(priv, queue);
1102 }
1103
1104 /**
1105  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1106  * @priv: driver private structure
1107  * @p: descriptor pointer
1108  * @i: descriptor index
1109  * @flags: gfp flag
1110  * @queue: RX queue index
1111  * Description: this function is called to allocate a receive buffer, perform
1112  * the DMA mapping and init the descriptor.
1113  */
1114 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1115                                   int i, gfp_t flags, u32 queue)
1116 {
1117         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1118         struct sk_buff *skb;
1119
1120         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1121         if (!skb) {
1122                 netdev_err(priv->dev,
1123                            "%s: Rx init fails; skb is NULL\n", __func__);
1124                 return -ENOMEM;
1125         }
1126         rx_q->rx_skbuff[i] = skb;
1127         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1128                                                 priv->dma_buf_sz,
1129                                                 DMA_FROM_DEVICE);
1130         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1131                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1132                 dev_kfree_skb_any(skb);
1133                 return -EINVAL;
1134         }
1135
1136         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1137                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1138         else
1139                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1140
1141         if ((priv->hw->mode->init_desc3) &&
1142             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1143                 priv->hw->mode->init_desc3(p);
1144
1145         return 0;
1146 }
1147
1148 /**
1149  * stmmac_free_rx_buffer - free RX dma buffers
1150  * @priv: private structure
1151  * @queue: RX queue index
1152  * @i: buffer index.
1153  */
1154 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1155 {
1156         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1157
1158         if (rx_q->rx_skbuff[i]) {
1159                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1160                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1161                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1162         }
1163         rx_q->rx_skbuff[i] = NULL;
1164 }
1165
1166 /**
1167  * stmmac_free_tx_buffer - free RX dma buffers
1168  * @priv: private structure
1169  * @queue: RX queue index
1170  * @i: buffer index.
1171  */
1172 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1173 {
1174         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1175
1176         if (tx_q->tx_skbuff_dma[i].buf) {
1177                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1178                         dma_unmap_page(priv->device,
1179                                        tx_q->tx_skbuff_dma[i].buf,
1180                                        tx_q->tx_skbuff_dma[i].len,
1181                                        DMA_TO_DEVICE);
1182                 else
1183                         dma_unmap_single(priv->device,
1184                                          tx_q->tx_skbuff_dma[i].buf,
1185                                          tx_q->tx_skbuff_dma[i].len,
1186                                          DMA_TO_DEVICE);
1187         }
1188
1189         if (tx_q->tx_skbuff[i]) {
1190                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1191                 tx_q->tx_skbuff[i] = NULL;
1192                 tx_q->tx_skbuff_dma[i].buf = 0;
1193                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1194         }
1195 }
1196
1197 /**
1198  * init_dma_rx_desc_rings - init the RX descriptor rings
1199  * @dev: net device structure
1200  * @flags: gfp flag.
1201  * Description: this function initializes the DMA RX descriptors
1202  * and allocates the socket buffers. It supports the chained and ring
1203  * modes.
1204  */
1205 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1206 {
1207         struct stmmac_priv *priv = netdev_priv(dev);
1208         u32 rx_count = priv->plat->rx_queues_to_use;
1209         unsigned int bfsize = 0;
1210         int ret = -ENOMEM;
1211         u32 queue;
1212         int i;
1213
1214         if (priv->hw->mode->set_16kib_bfsize)
1215                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1216
1217         if (bfsize < BUF_SIZE_16KiB)
1218                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1219
1220         priv->dma_buf_sz = bfsize;
1221
1222         /* RX INITIALIZATION */
1223         netif_dbg(priv, probe, priv->dev,
1224                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1225
1226         for (queue = 0; queue < rx_count; queue++) {
1227                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1228
1229                 netif_dbg(priv, probe, priv->dev,
1230                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1231                           (u32)rx_q->dma_rx_phy);
1232
1233                 for (i = 0; i < DMA_RX_SIZE; i++) {
1234                         struct dma_desc *p;
1235
1236                         if (priv->extend_desc)
1237                                 p = &((rx_q->dma_erx + i)->basic);
1238                         else
1239                                 p = rx_q->dma_rx + i;
1240
1241                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1242                                                      queue);
1243                         if (ret)
1244                                 goto err_init_rx_buffers;
1245
1246                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1247                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1248                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1249                 }
1250
1251                 rx_q->cur_rx = 0;
1252                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1253
1254                 stmmac_clear_rx_descriptors(priv, queue);
1255
1256                 /* Setup the chained descriptor addresses */
1257                 if (priv->mode == STMMAC_CHAIN_MODE) {
1258                         if (priv->extend_desc)
1259                                 priv->hw->mode->init(rx_q->dma_erx,
1260                                                      rx_q->dma_rx_phy,
1261                                                      DMA_RX_SIZE, 1);
1262                         else
1263                                 priv->hw->mode->init(rx_q->dma_rx,
1264                                                      rx_q->dma_rx_phy,
1265                                                      DMA_RX_SIZE, 0);
1266                 }
1267         }
1268
1269         buf_sz = bfsize;
1270
1271         return 0;
1272
1273 err_init_rx_buffers:
1274         while (queue >= 0) {
1275                 while (--i >= 0)
1276                         stmmac_free_rx_buffer(priv, queue, i);
1277
1278                 if (queue == 0)
1279                         break;
1280
1281                 i = DMA_RX_SIZE;
1282                 queue--;
1283         }
1284
1285         return ret;
1286 }
1287
1288 /**
1289  * init_dma_tx_desc_rings - init the TX descriptor rings
1290  * @dev: net device structure.
1291  * Description: this function initializes the DMA TX descriptors
1292  * and allocates the socket buffers. It supports the chained and ring
1293  * modes.
1294  */
1295 static int init_dma_tx_desc_rings(struct net_device *dev)
1296 {
1297         struct stmmac_priv *priv = netdev_priv(dev);
1298         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1299         u32 queue;
1300         int i;
1301
1302         for (queue = 0; queue < tx_queue_cnt; queue++) {
1303                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1304
1305                 netif_dbg(priv, probe, priv->dev,
1306                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1307                          (u32)tx_q->dma_tx_phy);
1308
1309                 /* Setup the chained descriptor addresses */
1310                 if (priv->mode == STMMAC_CHAIN_MODE) {
1311                         if (priv->extend_desc)
1312                                 priv->hw->mode->init(tx_q->dma_etx,
1313                                                      tx_q->dma_tx_phy,
1314                                                      DMA_TX_SIZE, 1);
1315                         else
1316                                 priv->hw->mode->init(tx_q->dma_tx,
1317                                                      tx_q->dma_tx_phy,
1318                                                      DMA_TX_SIZE, 0);
1319                 }
1320
1321                 for (i = 0; i < DMA_TX_SIZE; i++) {
1322                         struct dma_desc *p;
1323                         if (priv->extend_desc)
1324                                 p = &((tx_q->dma_etx + i)->basic);
1325                         else
1326                                 p = tx_q->dma_tx + i;
1327
1328                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1329                                 p->des0 = 0;
1330                                 p->des1 = 0;
1331                                 p->des2 = 0;
1332                                 p->des3 = 0;
1333                         } else {
1334                                 p->des2 = 0;
1335                         }
1336
1337                         tx_q->tx_skbuff_dma[i].buf = 0;
1338                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1339                         tx_q->tx_skbuff_dma[i].len = 0;
1340                         tx_q->tx_skbuff_dma[i].last_segment = false;
1341                         tx_q->tx_skbuff[i] = NULL;
1342                 }
1343
1344                 tx_q->dirty_tx = 0;
1345                 tx_q->cur_tx = 0;
1346
1347                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1348         }
1349
1350         return 0;
1351 }
1352
1353 /**
1354  * init_dma_desc_rings - init the RX/TX descriptor rings
1355  * @dev: net device structure
1356  * @flags: gfp flag.
1357  * Description: this function initializes the DMA RX/TX descriptors
1358  * and allocates the socket buffers. It supports the chained and ring
1359  * modes.
1360  */
1361 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1362 {
1363         struct stmmac_priv *priv = netdev_priv(dev);
1364         int ret;
1365
1366         ret = init_dma_rx_desc_rings(dev, flags);
1367         if (ret)
1368                 return ret;
1369
1370         ret = init_dma_tx_desc_rings(dev);
1371
1372         stmmac_clear_descriptors(priv);
1373
1374         if (netif_msg_hw(priv))
1375                 stmmac_display_rings(priv);
1376
1377         return ret;
1378 }
1379
1380 /**
1381  * dma_free_rx_skbufs - free RX dma buffers
1382  * @priv: private structure
1383  * @queue: RX queue index
1384  */
1385 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1386 {
1387         int i;
1388
1389         for (i = 0; i < DMA_RX_SIZE; i++)
1390                 stmmac_free_rx_buffer(priv, queue, i);
1391 }
1392
1393 /**
1394  * dma_free_tx_skbufs - free TX dma buffers
1395  * @priv: private structure
1396  * @queue: TX queue index
1397  */
1398 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1399 {
1400         int i;
1401
1402         for (i = 0; i < DMA_TX_SIZE; i++)
1403                 stmmac_free_tx_buffer(priv, queue, i);
1404 }
1405
1406 /**
1407  * free_dma_rx_desc_resources - free RX dma desc resources
1408  * @priv: private structure
1409  */
1410 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1411 {
1412         u32 rx_count = priv->plat->rx_queues_to_use;
1413         u32 queue;
1414
1415         /* Free RX queue resources */
1416         for (queue = 0; queue < rx_count; queue++) {
1417                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1418
1419                 /* Release the DMA RX socket buffers */
1420                 dma_free_rx_skbufs(priv, queue);
1421
1422                 /* Free DMA regions of consistent memory previously allocated */
1423                 if (!priv->extend_desc)
1424                         dma_free_coherent(priv->device,
1425                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1426                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1427                 else
1428                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1429                                           sizeof(struct dma_extended_desc),
1430                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1431
1432                 kfree(rx_q->rx_skbuff_dma);
1433                 kfree(rx_q->rx_skbuff);
1434         }
1435 }
1436
1437 /**
1438  * free_dma_tx_desc_resources - free TX dma desc resources
1439  * @priv: private structure
1440  */
1441 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1442 {
1443         u32 tx_count = priv->plat->tx_queues_to_use;
1444         u32 queue = 0;
1445
1446         /* Free TX queue resources */
1447         for (queue = 0; queue < tx_count; queue++) {
1448                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1449
1450                 /* Release the DMA TX socket buffers */
1451                 dma_free_tx_skbufs(priv, queue);
1452
1453                 /* Free DMA regions of consistent memory previously allocated */
1454                 if (!priv->extend_desc)
1455                         dma_free_coherent(priv->device,
1456                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1457                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1458                 else
1459                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1460                                           sizeof(struct dma_extended_desc),
1461                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1462
1463                 kfree(tx_q->tx_skbuff_dma);
1464                 kfree(tx_q->tx_skbuff);
1465         }
1466 }
1467
1468 /**
1469  * alloc_dma_rx_desc_resources - alloc RX resources.
1470  * @priv: private structure
1471  * Description: according to which descriptor can be used (extend or basic)
1472  * this function allocates the resources for TX and RX paths. In case of
1473  * reception, for example, it pre-allocated the RX socket buffer in order to
1474  * allow zero-copy mechanism.
1475  */
1476 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1477 {
1478         u32 rx_count = priv->plat->rx_queues_to_use;
1479         int ret = -ENOMEM;
1480         u32 queue;
1481
1482         /* RX queues buffers and DMA */
1483         for (queue = 0; queue < rx_count; queue++) {
1484                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1485
1486                 rx_q->queue_index = queue;
1487                 rx_q->priv_data = priv;
1488
1489                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1490                                                     sizeof(dma_addr_t),
1491                                                     GFP_KERNEL);
1492                 if (!rx_q->rx_skbuff_dma)
1493                         return -ENOMEM;
1494
1495                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1496                                                 sizeof(struct sk_buff *),
1497                                                 GFP_KERNEL);
1498                 if (!rx_q->rx_skbuff)
1499                         goto err_dma;
1500
1501                 if (priv->extend_desc) {
1502                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1503                                                             DMA_RX_SIZE *
1504                                                             sizeof(struct
1505                                                             dma_extended_desc),
1506                                                             &rx_q->dma_rx_phy,
1507                                                             GFP_KERNEL);
1508                         if (!rx_q->dma_erx)
1509                                 goto err_dma;
1510
1511                 } else {
1512                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1513                                                            DMA_RX_SIZE *
1514                                                            sizeof(struct
1515                                                            dma_desc),
1516                                                            &rx_q->dma_rx_phy,
1517                                                            GFP_KERNEL);
1518                         if (!rx_q->dma_rx)
1519                                 goto err_dma;
1520                 }
1521         }
1522
1523         return 0;
1524
1525 err_dma:
1526         free_dma_rx_desc_resources(priv);
1527
1528         return ret;
1529 }
1530
1531 /**
1532  * alloc_dma_tx_desc_resources - alloc TX resources.
1533  * @priv: private structure
1534  * Description: according to which descriptor can be used (extend or basic)
1535  * this function allocates the resources for TX and RX paths. In case of
1536  * reception, for example, it pre-allocated the RX socket buffer in order to
1537  * allow zero-copy mechanism.
1538  */
1539 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1540 {
1541         u32 tx_count = priv->plat->tx_queues_to_use;
1542         int ret = -ENOMEM;
1543         u32 queue;
1544
1545         /* TX queues buffers and DMA */
1546         for (queue = 0; queue < tx_count; queue++) {
1547                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1548
1549                 tx_q->queue_index = queue;
1550                 tx_q->priv_data = priv;
1551
1552                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1553                                                     sizeof(*tx_q->tx_skbuff_dma),
1554                                                     GFP_KERNEL);
1555                 if (!tx_q->tx_skbuff_dma)
1556                         return -ENOMEM;
1557
1558                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1559                                                 sizeof(struct sk_buff *),
1560                                                 GFP_KERNEL);
1561                 if (!tx_q->tx_skbuff)
1562                         goto err_dma_buffers;
1563
1564                 if (priv->extend_desc) {
1565                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1566                                                             DMA_TX_SIZE *
1567                                                             sizeof(struct
1568                                                             dma_extended_desc),
1569                                                             &tx_q->dma_tx_phy,
1570                                                             GFP_KERNEL);
1571                         if (!tx_q->dma_etx)
1572                                 goto err_dma_buffers;
1573                 } else {
1574                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1575                                                            DMA_TX_SIZE *
1576                                                            sizeof(struct
1577                                                                   dma_desc),
1578                                                            &tx_q->dma_tx_phy,
1579                                                            GFP_KERNEL);
1580                         if (!tx_q->dma_tx)
1581                                 goto err_dma_buffers;
1582                 }
1583         }
1584
1585         return 0;
1586
1587 err_dma_buffers:
1588         free_dma_tx_desc_resources(priv);
1589
1590         return ret;
1591 }
1592
1593 /**
1594  * alloc_dma_desc_resources - alloc TX/RX resources.
1595  * @priv: private structure
1596  * Description: according to which descriptor can be used (extend or basic)
1597  * this function allocates the resources for TX and RX paths. In case of
1598  * reception, for example, it pre-allocated the RX socket buffer in order to
1599  * allow zero-copy mechanism.
1600  */
1601 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1602 {
1603         /* RX Allocation */
1604         int ret = alloc_dma_rx_desc_resources(priv);
1605
1606         if (ret)
1607                 return ret;
1608
1609         ret = alloc_dma_tx_desc_resources(priv);
1610
1611         return ret;
1612 }
1613
1614 /**
1615  * free_dma_desc_resources - free dma desc resources
1616  * @priv: private structure
1617  */
1618 static void free_dma_desc_resources(struct stmmac_priv *priv)
1619 {
1620         /* Release the DMA RX socket buffers */
1621         free_dma_rx_desc_resources(priv);
1622
1623         /* Release the DMA TX socket buffers */
1624         free_dma_tx_desc_resources(priv);
1625 }
1626
1627 /**
1628  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1629  *  @priv: driver private structure
1630  *  Description: It is used for enabling the rx queues in the MAC
1631  */
1632 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1633 {
1634         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1635         int queue;
1636         u8 mode;
1637
1638         for (queue = 0; queue < rx_queues_count; queue++) {
1639                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1640                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1641         }
1642 }
1643
1644 /**
1645  * stmmac_start_rx_dma - start RX DMA channel
1646  * @priv: driver private structure
1647  * @chan: RX channel index
1648  * Description:
1649  * This starts a RX DMA channel
1650  */
1651 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1652 {
1653         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1654         priv->hw->dma->start_rx(priv->ioaddr, chan);
1655 }
1656
1657 /**
1658  * stmmac_start_tx_dma - start TX DMA channel
1659  * @priv: driver private structure
1660  * @chan: TX channel index
1661  * Description:
1662  * This starts a TX DMA channel
1663  */
1664 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1665 {
1666         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1667         priv->hw->dma->start_tx(priv->ioaddr, chan);
1668 }
1669
1670 /**
1671  * stmmac_stop_rx_dma - stop RX DMA channel
1672  * @priv: driver private structure
1673  * @chan: RX channel index
1674  * Description:
1675  * This stops a RX DMA channel
1676  */
1677 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1678 {
1679         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1680         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1681 }
1682
1683 /**
1684  * stmmac_stop_tx_dma - stop TX DMA channel
1685  * @priv: driver private structure
1686  * @chan: TX channel index
1687  * Description:
1688  * This stops a TX DMA channel
1689  */
1690 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1691 {
1692         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1693         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1694 }
1695
1696 /**
1697  * stmmac_start_all_dma - start all RX and TX DMA channels
1698  * @priv: driver private structure
1699  * Description:
1700  * This starts all the RX and TX DMA channels
1701  */
1702 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1703 {
1704         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1705         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1706         u32 chan = 0;
1707
1708         for (chan = 0; chan < rx_channels_count; chan++)
1709                 stmmac_start_rx_dma(priv, chan);
1710
1711         for (chan = 0; chan < tx_channels_count; chan++)
1712                 stmmac_start_tx_dma(priv, chan);
1713 }
1714
1715 /**
1716  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1717  * @priv: driver private structure
1718  * Description:
1719  * This stops the RX and TX DMA channels
1720  */
1721 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1722 {
1723         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1724         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1725         u32 chan = 0;
1726
1727         for (chan = 0; chan < rx_channels_count; chan++)
1728                 stmmac_stop_rx_dma(priv, chan);
1729
1730         for (chan = 0; chan < tx_channels_count; chan++)
1731                 stmmac_stop_tx_dma(priv, chan);
1732 }
1733
1734 /**
1735  *  stmmac_dma_operation_mode - HW DMA operation mode
1736  *  @priv: driver private structure
1737  *  Description: it is used for configuring the DMA operation mode register in
1738  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1739  */
1740 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1741 {
1742         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1743         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1744         int rxfifosz = priv->plat->rx_fifo_size;
1745         u32 txmode = 0;
1746         u32 rxmode = 0;
1747         u32 chan = 0;
1748
1749         if (rxfifosz == 0)
1750                 rxfifosz = priv->dma_cap.rx_fifo_size;
1751
1752         if (priv->plat->force_thresh_dma_mode) {
1753                 txmode = tc;
1754                 rxmode = tc;
1755         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1756                 /*
1757                  * In case of GMAC, SF mode can be enabled
1758                  * to perform the TX COE in HW. This depends on:
1759                  * 1) TX COE if actually supported
1760                  * 2) There is no bugged Jumbo frame support
1761                  *    that needs to not insert csum in the TDES.
1762                  */
1763                 txmode = SF_DMA_MODE;
1764                 rxmode = SF_DMA_MODE;
1765                 priv->xstats.threshold = SF_DMA_MODE;
1766         } else {
1767                 txmode = tc;
1768                 rxmode = SF_DMA_MODE;
1769         }
1770
1771         /* configure all channels */
1772         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1773                 for (chan = 0; chan < rx_channels_count; chan++)
1774                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1775                                                    rxfifosz);
1776
1777                 for (chan = 0; chan < tx_channels_count; chan++)
1778                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1779         } else {
1780                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1781                                         rxfifosz);
1782         }
1783 }
1784
1785 /**
1786  * stmmac_tx_clean - to manage the transmission completion
1787  * @priv: driver private structure
1788  * @queue: TX queue index
1789  * Description: it reclaims the transmit resources after transmission completes.
1790  */
1791 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1792 {
1793         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1794         unsigned int bytes_compl = 0, pkts_compl = 0;
1795         unsigned int entry = tx_q->dirty_tx;
1796
1797         netif_tx_lock(priv->dev);
1798
1799         priv->xstats.tx_clean++;
1800
1801         while (entry != tx_q->cur_tx) {
1802                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1803                 struct dma_desc *p;
1804                 int status;
1805
1806                 if (priv->extend_desc)
1807                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1808                 else
1809                         p = tx_q->dma_tx + entry;
1810
1811                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1812                                                       &priv->xstats, p,
1813                                                       priv->ioaddr);
1814                 /* Check if the descriptor is owned by the DMA */
1815                 if (unlikely(status & tx_dma_own))
1816                         break;
1817
1818                 /* Just consider the last segment and ...*/
1819                 if (likely(!(status & tx_not_ls))) {
1820                         /* ... verify the status error condition */
1821                         if (unlikely(status & tx_err)) {
1822                                 priv->dev->stats.tx_errors++;
1823                         } else {
1824                                 priv->dev->stats.tx_packets++;
1825                                 priv->xstats.tx_pkt_n++;
1826                         }
1827                         stmmac_get_tx_hwtstamp(priv, p, skb);
1828                 }
1829
1830                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1831                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1832                                 dma_unmap_page(priv->device,
1833                                                tx_q->tx_skbuff_dma[entry].buf,
1834                                                tx_q->tx_skbuff_dma[entry].len,
1835                                                DMA_TO_DEVICE);
1836                         else
1837                                 dma_unmap_single(priv->device,
1838                                                  tx_q->tx_skbuff_dma[entry].buf,
1839                                                  tx_q->tx_skbuff_dma[entry].len,
1840                                                  DMA_TO_DEVICE);
1841                         tx_q->tx_skbuff_dma[entry].buf = 0;
1842                         tx_q->tx_skbuff_dma[entry].len = 0;
1843                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1844                 }
1845
1846                 if (priv->hw->mode->clean_desc3)
1847                         priv->hw->mode->clean_desc3(tx_q, p);
1848
1849                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1850                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1851
1852                 if (likely(skb != NULL)) {
1853                         pkts_compl++;
1854                         bytes_compl += skb->len;
1855                         dev_consume_skb_any(skb);
1856                         tx_q->tx_skbuff[entry] = NULL;
1857                 }
1858
1859                 priv->hw->desc->release_tx_desc(p, priv->mode);
1860
1861                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1862         }
1863         tx_q->dirty_tx = entry;
1864
1865         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1866                                   pkts_compl, bytes_compl);
1867
1868         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1869                                                                 queue))) &&
1870             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1871
1872                 netif_dbg(priv, tx_done, priv->dev,
1873                           "%s: restart transmit\n", __func__);
1874                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1875         }
1876
1877         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1878                 stmmac_enable_eee_mode(priv);
1879                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1880         }
1881         netif_tx_unlock(priv->dev);
1882 }
1883
1884 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1885 {
1886         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1887 }
1888
1889 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1890 {
1891         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1892 }
1893
1894 /**
1895  * stmmac_tx_err - to manage the tx error
1896  * @priv: driver private structure
1897  * @chan: channel index
1898  * Description: it cleans the descriptors and restarts the transmission
1899  * in case of transmission errors.
1900  */
1901 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1902 {
1903         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1904         int i;
1905
1906         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1907
1908         stmmac_stop_tx_dma(priv, chan);
1909         dma_free_tx_skbufs(priv, chan);
1910         for (i = 0; i < DMA_TX_SIZE; i++)
1911                 if (priv->extend_desc)
1912                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1913                                                      priv->mode,
1914                                                      (i == DMA_TX_SIZE - 1));
1915                 else
1916                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1917                                                      priv->mode,
1918                                                      (i == DMA_TX_SIZE - 1));
1919         tx_q->dirty_tx = 0;
1920         tx_q->cur_tx = 0;
1921         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1922         stmmac_start_tx_dma(priv, chan);
1923
1924         priv->dev->stats.tx_errors++;
1925         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1926 }
1927
1928 /**
1929  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1930  *  @priv: driver private structure
1931  *  @txmode: TX operating mode
1932  *  @rxmode: RX operating mode
1933  *  @chan: channel index
1934  *  Description: it is used for configuring of the DMA operation mode in
1935  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1936  *  mode.
1937  */
1938 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1939                                           u32 rxmode, u32 chan)
1940 {
1941         int rxfifosz = priv->plat->rx_fifo_size;
1942
1943         if (rxfifosz == 0)
1944                 rxfifosz = priv->dma_cap.rx_fifo_size;
1945
1946         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1947                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1948                                            rxfifosz);
1949                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1950         } else {
1951                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1952                                         rxfifosz);
1953         }
1954 }
1955
1956 /**
1957  * stmmac_dma_interrupt - DMA ISR
1958  * @priv: driver private structure
1959  * Description: this is the DMA ISR. It is called by the main ISR.
1960  * It calls the dwmac dma routine and schedule poll method in case of some
1961  * work can be done.
1962  */
1963 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1964 {
1965         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1966         int status;
1967         u32 chan;
1968
1969         for (chan = 0; chan < tx_channel_count; chan++) {
1970                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1971
1972                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1973                                                       &priv->xstats, chan);
1974                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1975                         if (likely(napi_schedule_prep(&rx_q->napi))) {
1976                                 stmmac_disable_dma_irq(priv, chan);
1977                                 __napi_schedule(&rx_q->napi);
1978                         }
1979                 }
1980
1981                 if (unlikely(status & tx_hard_error_bump_tc)) {
1982                         /* Try to bump up the dma threshold on this failure */
1983                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1984                             (tc <= 256)) {
1985                                 tc += 64;
1986                                 if (priv->plat->force_thresh_dma_mode)
1987                                         stmmac_set_dma_operation_mode(priv,
1988                                                                       tc,
1989                                                                       tc,
1990                                                                       chan);
1991                                 else
1992                                         stmmac_set_dma_operation_mode(priv,
1993                                                                     tc,
1994                                                                     SF_DMA_MODE,
1995                                                                     chan);
1996                                 priv->xstats.threshold = tc;
1997                         }
1998                 } else if (unlikely(status == tx_hard_error)) {
1999                         stmmac_tx_err(priv, chan);
2000                 }
2001         }
2002 }
2003
2004 /**
2005  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2006  * @priv: driver private structure
2007  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2008  */
2009 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2010 {
2011         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2012                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2013
2014         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2015                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2016                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2017         } else {
2018                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2019                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2020         }
2021
2022         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2023
2024         if (priv->dma_cap.rmon) {
2025                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2026                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2027         } else
2028                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2029 }
2030
2031 /**
2032  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2033  * @priv: driver private structure
2034  * Description: select the Enhanced/Alternate or Normal descriptors.
2035  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2036  * supported by the HW capability register.
2037  */
2038 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2039 {
2040         if (priv->plat->enh_desc) {
2041                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2042
2043                 /* GMAC older than 3.50 has no extended descriptors */
2044                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2045                         dev_info(priv->device, "Enabled extended descriptors\n");
2046                         priv->extend_desc = 1;
2047                 } else
2048                         dev_warn(priv->device, "Extended descriptors not supported\n");
2049
2050                 priv->hw->desc = &enh_desc_ops;
2051         } else {
2052                 dev_info(priv->device, "Normal descriptors\n");
2053                 priv->hw->desc = &ndesc_ops;
2054         }
2055 }
2056
2057 /**
2058  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2059  * @priv: driver private structure
2060  * Description:
2061  *  new GMAC chip generations have a new register to indicate the
2062  *  presence of the optional feature/functions.
2063  *  This can be also used to override the value passed through the
2064  *  platform and necessary for old MAC10/100 and GMAC chips.
2065  */
2066 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2067 {
2068         u32 ret = 0;
2069
2070         if (priv->hw->dma->get_hw_feature) {
2071                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2072                                               &priv->dma_cap);
2073                 ret = 1;
2074         }
2075
2076         return ret;
2077 }
2078
2079 /**
2080  * stmmac_check_ether_addr - check if the MAC addr is valid
2081  * @priv: driver private structure
2082  * Description:
2083  * it is to verify if the MAC address is valid, in case of failures it
2084  * generates a random MAC address
2085  */
2086 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2087 {
2088         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2089                 priv->hw->mac->get_umac_addr(priv->hw,
2090                                              priv->dev->dev_addr, 0);
2091                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2092                         eth_hw_addr_random(priv->dev);
2093                 netdev_info(priv->dev, "device MAC address %pM\n",
2094                             priv->dev->dev_addr);
2095         }
2096 }
2097
2098 /**
2099  * stmmac_init_dma_engine - DMA init.
2100  * @priv: driver private structure
2101  * Description:
2102  * It inits the DMA invoking the specific MAC/GMAC callback.
2103  * Some DMA parameters can be passed from the platform;
2104  * in case of these are not passed a default is kept for the MAC or GMAC.
2105  */
2106 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2107 {
2108         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2109         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2110         struct stmmac_rx_queue *rx_q;
2111         struct stmmac_tx_queue *tx_q;
2112         u32 dummy_dma_rx_phy = 0;
2113         u32 dummy_dma_tx_phy = 0;
2114         u32 chan = 0;
2115         int atds = 0;
2116         int ret = 0;
2117
2118         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2119                 dev_err(priv->device, "Invalid DMA configuration\n");
2120                 return -EINVAL;
2121         }
2122
2123         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2124                 atds = 1;
2125
2126         ret = priv->hw->dma->reset(priv->ioaddr);
2127         if (ret) {
2128                 dev_err(priv->device, "Failed to reset the dma\n");
2129                 return ret;
2130         }
2131
2132         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2133                 /* DMA Configuration */
2134                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2135                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2136
2137                 /* DMA RX Channel Configuration */
2138                 for (chan = 0; chan < rx_channels_count; chan++) {
2139                         rx_q = &priv->rx_queue[chan];
2140
2141                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2142                                                     priv->plat->dma_cfg,
2143                                                     rx_q->dma_rx_phy, chan);
2144
2145                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2146                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2147                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2148                                                        rx_q->rx_tail_addr,
2149                                                        chan);
2150                 }
2151
2152                 /* DMA TX Channel Configuration */
2153                 for (chan = 0; chan < tx_channels_count; chan++) {
2154                         tx_q = &priv->tx_queue[chan];
2155
2156                         priv->hw->dma->init_chan(priv->ioaddr,
2157                                                  priv->plat->dma_cfg,
2158                                                  chan);
2159
2160                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2161                                                     priv->plat->dma_cfg,
2162                                                     tx_q->dma_tx_phy, chan);
2163
2164                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2165                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2166                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2167                                                        tx_q->tx_tail_addr,
2168                                                        chan);
2169                 }
2170         } else {
2171                 rx_q = &priv->rx_queue[chan];
2172                 tx_q = &priv->tx_queue[chan];
2173                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2174                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2175         }
2176
2177         if (priv->plat->axi && priv->hw->dma->axi)
2178                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2179
2180         return ret;
2181 }
2182
2183 /**
2184  * stmmac_tx_timer - mitigation sw timer for tx.
2185  * @data: data pointer
2186  * Description:
2187  * This is the timer handler to directly invoke the stmmac_tx_clean.
2188  */
2189 static void stmmac_tx_timer(unsigned long data)
2190 {
2191         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2192         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2193         u32 queue;
2194
2195         /* let's scan all the tx queues */
2196         for (queue = 0; queue < tx_queues_count; queue++)
2197                 stmmac_tx_clean(priv, queue);
2198 }
2199
2200 /**
2201  * stmmac_init_tx_coalesce - init tx mitigation options.
2202  * @priv: driver private structure
2203  * Description:
2204  * This inits the transmit coalesce parameters: i.e. timer rate,
2205  * timer handler and default threshold used for enabling the
2206  * interrupt on completion bit.
2207  */
2208 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2209 {
2210         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2211         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2212         init_timer(&priv->txtimer);
2213         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2214         priv->txtimer.data = (unsigned long)priv;
2215         priv->txtimer.function = stmmac_tx_timer;
2216         add_timer(&priv->txtimer);
2217 }
2218
2219 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2220 {
2221         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2222         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2223         u32 chan;
2224
2225         /* set TX ring length */
2226         if (priv->hw->dma->set_tx_ring_len) {
2227                 for (chan = 0; chan < tx_channels_count; chan++)
2228                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2229                                                        (DMA_TX_SIZE - 1), chan);
2230         }
2231
2232         /* set RX ring length */
2233         if (priv->hw->dma->set_rx_ring_len) {
2234                 for (chan = 0; chan < rx_channels_count; chan++)
2235                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2236                                                        (DMA_RX_SIZE - 1), chan);
2237         }
2238 }
2239
2240 /**
2241  *  stmmac_set_tx_queue_weight - Set TX queue weight
2242  *  @priv: driver private structure
2243  *  Description: It is used for setting TX queues weight
2244  */
2245 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2246 {
2247         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2248         u32 weight;
2249         u32 queue;
2250
2251         for (queue = 0; queue < tx_queues_count; queue++) {
2252                 weight = priv->plat->tx_queues_cfg[queue].weight;
2253                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2254         }
2255 }
2256
2257 /**
2258  *  stmmac_configure_cbs - Configure CBS in TX queue
2259  *  @priv: driver private structure
2260  *  Description: It is used for configuring CBS in AVB TX queues
2261  */
2262 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2263 {
2264         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2265         u32 mode_to_use;
2266         u32 queue;
2267
2268         /* queue 0 is reserved for legacy traffic */
2269         for (queue = 1; queue < tx_queues_count; queue++) {
2270                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2271                 if (mode_to_use == MTL_QUEUE_DCB)
2272                         continue;
2273
2274                 priv->hw->mac->config_cbs(priv->hw,
2275                                 priv->plat->tx_queues_cfg[queue].send_slope,
2276                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2277                                 priv->plat->tx_queues_cfg[queue].high_credit,
2278                                 priv->plat->tx_queues_cfg[queue].low_credit,
2279                                 queue);
2280         }
2281 }
2282
2283 /**
2284  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2285  *  @priv: driver private structure
2286  *  Description: It is used for mapping RX queues to RX dma channels
2287  */
2288 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2289 {
2290         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2291         u32 queue;
2292         u32 chan;
2293
2294         for (queue = 0; queue < rx_queues_count; queue++) {
2295                 chan = priv->plat->rx_queues_cfg[queue].chan;
2296                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2297         }
2298 }
2299
2300 /**
2301  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2302  *  @priv: driver private structure
2303  *  Description: It is used for configuring the RX Queue Priority
2304  */
2305 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2306 {
2307         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2308         u32 queue;
2309         u32 prio;
2310
2311         for (queue = 0; queue < rx_queues_count; queue++) {
2312                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2313                         continue;
2314
2315                 prio = priv->plat->rx_queues_cfg[queue].prio;
2316                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2317         }
2318 }
2319
2320 /**
2321  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2322  *  @priv: driver private structure
2323  *  Description: It is used for configuring the TX Queue Priority
2324  */
2325 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2326 {
2327         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2328         u32 queue;
2329         u32 prio;
2330
2331         for (queue = 0; queue < tx_queues_count; queue++) {
2332                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2333                         continue;
2334
2335                 prio = priv->plat->tx_queues_cfg[queue].prio;
2336                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2337         }
2338 }
2339
2340 /**
2341  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2342  *  @priv: driver private structure
2343  *  Description: It is used for configuring the RX queue routing
2344  */
2345 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2346 {
2347         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2348         u32 queue;
2349         u8 packet;
2350
2351         for (queue = 0; queue < rx_queues_count; queue++) {
2352                 /* no specific packet type routing specified for the queue */
2353                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2354                         continue;
2355
2356                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2357                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2358         }
2359 }
2360
2361 /**
2362  *  stmmac_mtl_configuration - Configure MTL
2363  *  @priv: driver private structure
2364  *  Description: It is used for configurring MTL
2365  */
2366 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2367 {
2368         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2369         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2370
2371         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2372                 stmmac_set_tx_queue_weight(priv);
2373
2374         /* Configure MTL RX algorithms */
2375         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2376                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2377                                                 priv->plat->rx_sched_algorithm);
2378
2379         /* Configure MTL TX algorithms */
2380         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2381                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2382                                                 priv->plat->tx_sched_algorithm);
2383
2384         /* Configure CBS in AVB TX queues */
2385         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2386                 stmmac_configure_cbs(priv);
2387
2388         /* Map RX MTL to DMA channels */
2389         if (priv->hw->mac->map_mtl_to_dma)
2390                 stmmac_rx_queue_dma_chan_map(priv);
2391
2392         /* Enable MAC RX Queues */
2393         if (priv->hw->mac->rx_queue_enable)
2394                 stmmac_mac_enable_rx_queues(priv);
2395
2396         /* Set RX priorities */
2397         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2398                 stmmac_mac_config_rx_queues_prio(priv);
2399
2400         /* Set TX priorities */
2401         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2402                 stmmac_mac_config_tx_queues_prio(priv);
2403
2404         /* Set RX routing */
2405         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2406                 stmmac_mac_config_rx_queues_routing(priv);
2407 }
2408
2409 /**
2410  * stmmac_hw_setup - setup mac in a usable state.
2411  *  @dev : pointer to the device structure.
2412  *  Description:
2413  *  this is the main function to setup the HW in a usable state because the
2414  *  dma engine is reset, the core registers are configured (e.g. AXI,
2415  *  Checksum features, timers). The DMA is ready to start receiving and
2416  *  transmitting.
2417  *  Return value:
2418  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2419  *  file on failure.
2420  */
2421 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2422 {
2423         struct stmmac_priv *priv = netdev_priv(dev);
2424         u32 rx_cnt = priv->plat->rx_queues_to_use;
2425         u32 tx_cnt = priv->plat->tx_queues_to_use;
2426         u32 chan;
2427         int ret;
2428
2429         /* DMA initialization and SW reset */
2430         ret = stmmac_init_dma_engine(priv);
2431         if (ret < 0) {
2432                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2433                            __func__);
2434                 return ret;
2435         }
2436
2437         /* Copy the MAC addr into the HW  */
2438         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2439
2440         /* PS and related bits will be programmed according to the speed */
2441         if (priv->hw->pcs) {
2442                 int speed = priv->plat->mac_port_sel_speed;
2443
2444                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2445                     (speed == SPEED_1000)) {
2446                         priv->hw->ps = speed;
2447                 } else {
2448                         dev_warn(priv->device, "invalid port speed\n");
2449                         priv->hw->ps = 0;
2450                 }
2451         }
2452
2453         /* Initialize the MAC Core */
2454         priv->hw->mac->core_init(priv->hw, dev->mtu);
2455
2456         /* Initialize MTL*/
2457         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2458                 stmmac_mtl_configuration(priv);
2459
2460         ret = priv->hw->mac->rx_ipc(priv->hw);
2461         if (!ret) {
2462                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2463                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2464                 priv->hw->rx_csum = 0;
2465         }
2466
2467         /* Enable the MAC Rx/Tx */
2468         priv->hw->mac->set_mac(priv->ioaddr, true);
2469
2470         /* Set the HW DMA mode and the COE */
2471         stmmac_dma_operation_mode(priv);
2472
2473         stmmac_mmc_setup(priv);
2474
2475         if (init_ptp) {
2476                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2477                 if (ret < 0)
2478                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2479
2480                 ret = stmmac_init_ptp(priv);
2481                 if (ret == -EOPNOTSUPP)
2482                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2483                 else if (ret)
2484                         netdev_warn(priv->dev, "PTP init failed\n");
2485         }
2486
2487 #ifdef CONFIG_DEBUG_FS
2488         ret = stmmac_init_fs(dev);
2489         if (ret < 0)
2490                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2491                             __func__);
2492 #endif
2493         /* Start the ball rolling... */
2494         stmmac_start_all_dma(priv);
2495
2496         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2497
2498         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2499                 priv->rx_riwt = MAX_DMA_RIWT;
2500                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2501         }
2502
2503         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2504                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2505
2506         /* set TX and RX rings length */
2507         stmmac_set_rings_length(priv);
2508
2509         /* Enable TSO */
2510         if (priv->tso) {
2511                 for (chan = 0; chan < tx_cnt; chan++)
2512                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2513         }
2514
2515         return 0;
2516 }
2517
2518 static void stmmac_hw_teardown(struct net_device *dev)
2519 {
2520         struct stmmac_priv *priv = netdev_priv(dev);
2521
2522         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2523 }
2524
2525 /**
2526  *  stmmac_open - open entry point of the driver
2527  *  @dev : pointer to the device structure.
2528  *  Description:
2529  *  This function is the open entry point of the driver.
2530  *  Return value:
2531  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2532  *  file on failure.
2533  */
2534 static int stmmac_open(struct net_device *dev)
2535 {
2536         struct stmmac_priv *priv = netdev_priv(dev);
2537         int ret;
2538
2539         stmmac_check_ether_addr(priv);
2540
2541         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2542             priv->hw->pcs != STMMAC_PCS_TBI &&
2543             priv->hw->pcs != STMMAC_PCS_RTBI) {
2544                 ret = stmmac_init_phy(dev);
2545                 if (ret) {
2546                         netdev_err(priv->dev,
2547                                    "%s: Cannot attach to PHY (error: %d)\n",
2548                                    __func__, ret);
2549                         return ret;
2550                 }
2551         }
2552
2553         /* Extra statistics */
2554         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2555         priv->xstats.threshold = tc;
2556
2557         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2558         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2559
2560         ret = alloc_dma_desc_resources(priv);
2561         if (ret < 0) {
2562                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2563                            __func__);
2564                 goto dma_desc_error;
2565         }
2566
2567         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2568         if (ret < 0) {
2569                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2570                            __func__);
2571                 goto init_error;
2572         }
2573
2574         ret = stmmac_hw_setup(dev, true);
2575         if (ret < 0) {
2576                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2577                 goto init_error;
2578         }
2579
2580         stmmac_init_tx_coalesce(priv);
2581
2582         if (dev->phydev)
2583                 phy_start(dev->phydev);
2584
2585         /* Request the IRQ lines */
2586         ret = request_irq(dev->irq, stmmac_interrupt,
2587                           IRQF_SHARED, dev->name, dev);
2588         if (unlikely(ret < 0)) {
2589                 netdev_err(priv->dev,
2590                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2591                            __func__, dev->irq, ret);
2592                 goto irq_error;
2593         }
2594
2595         /* Request the Wake IRQ in case of another line is used for WoL */
2596         if (priv->wol_irq != dev->irq) {
2597                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2598                                   IRQF_SHARED, dev->name, dev);
2599                 if (unlikely(ret < 0)) {
2600                         netdev_err(priv->dev,
2601                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2602                                    __func__, priv->wol_irq, ret);
2603                         goto wolirq_error;
2604                 }
2605         }
2606
2607         /* Request the IRQ lines */
2608         if (priv->lpi_irq > 0) {
2609                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2610                                   dev->name, dev);
2611                 if (unlikely(ret < 0)) {
2612                         netdev_err(priv->dev,
2613                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2614                                    __func__, priv->lpi_irq, ret);
2615                         goto lpiirq_error;
2616                 }
2617         }
2618
2619         stmmac_enable_all_queues(priv);
2620         stmmac_start_all_queues(priv);
2621
2622         return 0;
2623
2624 lpiirq_error:
2625         if (priv->wol_irq != dev->irq)
2626                 free_irq(priv->wol_irq, dev);
2627 wolirq_error:
2628         free_irq(dev->irq, dev);
2629 irq_error:
2630         if (dev->phydev)
2631                 phy_stop(dev->phydev);
2632
2633         del_timer_sync(&priv->txtimer);
2634         stmmac_hw_teardown(dev);
2635 init_error:
2636         free_dma_desc_resources(priv);
2637 dma_desc_error:
2638         if (dev->phydev)
2639                 phy_disconnect(dev->phydev);
2640
2641         return ret;
2642 }
2643
2644 /**
2645  *  stmmac_release - close entry point of the driver
2646  *  @dev : device pointer.
2647  *  Description:
2648  *  This is the stop entry point of the driver.
2649  */
2650 static int stmmac_release(struct net_device *dev)
2651 {
2652         struct stmmac_priv *priv = netdev_priv(dev);
2653
2654         if (priv->eee_enabled)
2655                 del_timer_sync(&priv->eee_ctrl_timer);
2656
2657         /* Stop and disconnect the PHY */
2658         if (dev->phydev) {
2659                 phy_stop(dev->phydev);
2660                 phy_disconnect(dev->phydev);
2661         }
2662
2663         stmmac_stop_all_queues(priv);
2664
2665         stmmac_disable_all_queues(priv);
2666
2667         del_timer_sync(&priv->txtimer);
2668
2669         /* Free the IRQ lines */
2670         free_irq(dev->irq, dev);
2671         if (priv->wol_irq != dev->irq)
2672                 free_irq(priv->wol_irq, dev);
2673         if (priv->lpi_irq > 0)
2674                 free_irq(priv->lpi_irq, dev);
2675
2676         /* Stop TX/RX DMA and clear the descriptors */
2677         stmmac_stop_all_dma(priv);
2678
2679         /* Release and free the Rx/Tx resources */
2680         free_dma_desc_resources(priv);
2681
2682         /* Disable the MAC Rx/Tx */
2683         priv->hw->mac->set_mac(priv->ioaddr, false);
2684
2685         netif_carrier_off(dev);
2686
2687 #ifdef CONFIG_DEBUG_FS
2688         stmmac_exit_fs(dev);
2689 #endif
2690
2691         stmmac_release_ptp(priv);
2692
2693         return 0;
2694 }
2695
2696 /**
2697  *  stmmac_tso_allocator - close entry point of the driver
2698  *  @priv: driver private structure
2699  *  @des: buffer start address
2700  *  @total_len: total length to fill in descriptors
2701  *  @last_segmant: condition for the last descriptor
2702  *  @queue: TX queue index
2703  *  Description:
2704  *  This function fills descriptor and request new descriptors according to
2705  *  buffer length to fill
2706  */
2707 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2708                                  int total_len, bool last_segment, u32 queue)
2709 {
2710         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2711         struct dma_desc *desc;
2712         u32 buff_size;
2713         int tmp_len;
2714
2715         tmp_len = total_len;
2716
2717         while (tmp_len > 0) {
2718                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2719                 desc = tx_q->dma_tx + tx_q->cur_tx;
2720
2721                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2722                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2723                             TSO_MAX_BUFF_SIZE : tmp_len;
2724
2725                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2726                         0, 1,
2727                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2728                         0, 0);
2729
2730                 tmp_len -= TSO_MAX_BUFF_SIZE;
2731         }
2732 }
2733
2734 /**
2735  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2736  *  @skb : the socket buffer
2737  *  @dev : device pointer
2738  *  Description: this is the transmit function that is called on TSO frames
2739  *  (support available on GMAC4 and newer chips).
2740  *  Diagram below show the ring programming in case of TSO frames:
2741  *
2742  *  First Descriptor
2743  *   --------
2744  *   | DES0 |---> buffer1 = L2/L3/L4 header
2745  *   | DES1 |---> TCP Payload (can continue on next descr...)
2746  *   | DES2 |---> buffer 1 and 2 len
2747  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2748  *   --------
2749  *      |
2750  *     ...
2751  *      |
2752  *   --------
2753  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2754  *   | DES1 | --|
2755  *   | DES2 | --> buffer 1 and 2 len
2756  *   | DES3 |
2757  *   --------
2758  *
2759  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2760  */
2761 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2762 {
2763         struct dma_desc *desc, *first, *mss_desc = NULL;
2764         struct stmmac_priv *priv = netdev_priv(dev);
2765         int nfrags = skb_shinfo(skb)->nr_frags;
2766         u32 queue = skb_get_queue_mapping(skb);
2767         unsigned int first_entry, des;
2768         struct stmmac_tx_queue *tx_q;
2769         int tmp_pay_len = 0;
2770         u32 pay_len, mss;
2771         u8 proto_hdr_len;
2772         int i;
2773
2774         tx_q = &priv->tx_queue[queue];
2775
2776         /* Compute header lengths */
2777         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2778
2779         /* Desc availability based on threshold should be enough safe */
2780         if (unlikely(stmmac_tx_avail(priv, queue) <
2781                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2782                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2783                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2784                                                                 queue));
2785                         /* This is a hard error, log it. */
2786                         netdev_err(priv->dev,
2787                                    "%s: Tx Ring full when queue awake\n",
2788                                    __func__);
2789                 }
2790                 return NETDEV_TX_BUSY;
2791         }
2792
2793         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2794
2795         mss = skb_shinfo(skb)->gso_size;
2796
2797         /* set new MSS value if needed */
2798         if (mss != priv->mss) {
2799                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2800                 priv->hw->desc->set_mss(mss_desc, mss);
2801                 priv->mss = mss;
2802                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2803         }
2804
2805         if (netif_msg_tx_queued(priv)) {
2806                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2807                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2808                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2809                         skb->data_len);
2810         }
2811
2812         first_entry = tx_q->cur_tx;
2813
2814         desc = tx_q->dma_tx + first_entry;
2815         first = desc;
2816
2817         /* first descriptor: fill Headers on Buf1 */
2818         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2819                              DMA_TO_DEVICE);
2820         if (dma_mapping_error(priv->device, des))
2821                 goto dma_map_err;
2822
2823         tx_q->tx_skbuff_dma[first_entry].buf = des;
2824         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2825         tx_q->tx_skbuff[first_entry] = skb;
2826
2827         first->des0 = cpu_to_le32(des);
2828
2829         /* Fill start of payload in buff2 of first descriptor */
2830         if (pay_len)
2831                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2832
2833         /* If needed take extra descriptors to fill the remaining payload */
2834         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2835
2836         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2837
2838         /* Prepare fragments */
2839         for (i = 0; i < nfrags; i++) {
2840                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2841
2842                 des = skb_frag_dma_map(priv->device, frag, 0,
2843                                        skb_frag_size(frag),
2844                                        DMA_TO_DEVICE);
2845                 if (dma_mapping_error(priv->device, des))
2846                         goto dma_map_err;
2847
2848                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2849                                      (i == nfrags - 1), queue);
2850
2851                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2852                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2853                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2854                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2855         }
2856
2857         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2858
2859         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2860
2861         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2862                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2863                           __func__);
2864                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2865         }
2866
2867         dev->stats.tx_bytes += skb->len;
2868         priv->xstats.tx_tso_frames++;
2869         priv->xstats.tx_tso_nfrags += nfrags;
2870
2871         /* Manage tx mitigation */
2872         priv->tx_count_frames += nfrags + 1;
2873         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2874                 mod_timer(&priv->txtimer,
2875                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2876         } else {
2877                 priv->tx_count_frames = 0;
2878                 priv->hw->desc->set_tx_ic(desc);
2879                 priv->xstats.tx_set_ic_bit++;
2880         }
2881
2882         if (!priv->hwts_tx_en)
2883                 skb_tx_timestamp(skb);
2884
2885         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2886                      priv->hwts_tx_en)) {
2887                 /* declare that device is doing timestamping */
2888                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2889                 priv->hw->desc->enable_tx_timestamp(first);
2890         }
2891
2892         /* Complete the first descriptor before granting the DMA */
2893         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2894                         proto_hdr_len,
2895                         pay_len,
2896                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2897                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2898
2899         /* If context desc is used to change MSS */
2900         if (mss_desc)
2901                 priv->hw->desc->set_tx_owner(mss_desc);
2902
2903         /* The own bit must be the latest setting done when prepare the
2904          * descriptor and then barrier is needed to make sure that
2905          * all is coherent before granting the DMA engine.
2906          */
2907         dma_wmb();
2908
2909         if (netif_msg_pktdata(priv)) {
2910                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2911                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2912                         tx_q->cur_tx, first, nfrags);
2913
2914                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2915                                              0);
2916
2917                 pr_info(">>> frame to be transmitted: ");
2918                 print_pkt(skb->data, skb_headlen(skb));
2919         }
2920
2921         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2922
2923         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2924                                        queue);
2925
2926         return NETDEV_TX_OK;
2927
2928 dma_map_err:
2929         dev_err(priv->device, "Tx dma map failed\n");
2930         dev_kfree_skb(skb);
2931         priv->dev->stats.tx_dropped++;
2932         return NETDEV_TX_OK;
2933 }
2934
2935 /**
2936  *  stmmac_xmit - Tx entry point of the driver
2937  *  @skb : the socket buffer
2938  *  @dev : device pointer
2939  *  Description : this is the tx entry point of the driver.
2940  *  It programs the chain or the ring and supports oversized frames
2941  *  and SG feature.
2942  */
2943 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2944 {
2945         struct stmmac_priv *priv = netdev_priv(dev);
2946         unsigned int nopaged_len = skb_headlen(skb);
2947         int i, csum_insertion = 0, is_jumbo = 0;
2948         u32 queue = skb_get_queue_mapping(skb);
2949         int nfrags = skb_shinfo(skb)->nr_frags;
2950         unsigned int entry, first_entry;
2951         struct dma_desc *desc, *first;
2952         struct stmmac_tx_queue *tx_q;
2953         unsigned int enh_desc;
2954         unsigned int des;
2955
2956         tx_q = &priv->tx_queue[queue];
2957
2958         /* Manage oversized TCP frames for GMAC4 device */
2959         if (skb_is_gso(skb) && priv->tso) {
2960                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2961                         return stmmac_tso_xmit(skb, dev);
2962         }
2963
2964         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2965                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2966                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2967                                                                 queue));
2968                         /* This is a hard error, log it. */
2969                         netdev_err(priv->dev,
2970                                    "%s: Tx Ring full when queue awake\n",
2971                                    __func__);
2972                 }
2973                 return NETDEV_TX_BUSY;
2974         }
2975
2976         if (priv->tx_path_in_lpi_mode)
2977                 stmmac_disable_eee_mode(priv);
2978
2979         entry = tx_q->cur_tx;
2980         first_entry = entry;
2981
2982         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2983
2984         if (likely(priv->extend_desc))
2985                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2986         else
2987                 desc = tx_q->dma_tx + entry;
2988
2989         first = desc;
2990
2991         tx_q->tx_skbuff[first_entry] = skb;
2992
2993         enh_desc = priv->plat->enh_desc;
2994         /* To program the descriptors according to the size of the frame */
2995         if (enh_desc)
2996                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2997
2998         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2999                                          DWMAC_CORE_4_00)) {
3000                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3001                 if (unlikely(entry < 0))
3002                         goto dma_map_err;
3003         }
3004
3005         for (i = 0; i < nfrags; i++) {
3006                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3007                 int len = skb_frag_size(frag);
3008                 bool last_segment = (i == (nfrags - 1));
3009
3010                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3011
3012                 if (likely(priv->extend_desc))
3013                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3014                 else
3015                         desc = tx_q->dma_tx + entry;
3016
3017                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3018                                        DMA_TO_DEVICE);
3019                 if (dma_mapping_error(priv->device, des))
3020                         goto dma_map_err; /* should reuse desc w/o issues */
3021
3022                 tx_q->tx_skbuff[entry] = NULL;
3023
3024                 tx_q->tx_skbuff_dma[entry].buf = des;
3025                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3026                         desc->des0 = cpu_to_le32(des);
3027                 else
3028                         desc->des2 = cpu_to_le32(des);
3029
3030                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3031                 tx_q->tx_skbuff_dma[entry].len = len;
3032                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3033
3034                 /* Prepare the descriptor and set the own bit too */
3035                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3036                                                 priv->mode, 1, last_segment,
3037                                                 skb->len);
3038         }
3039
3040         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3041
3042         tx_q->cur_tx = entry;
3043
3044         if (netif_msg_pktdata(priv)) {
3045                 void *tx_head;
3046
3047                 netdev_dbg(priv->dev,
3048                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3049                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3050                            entry, first, nfrags);
3051
3052                 if (priv->extend_desc)
3053                         tx_head = (void *)tx_q->dma_etx;
3054                 else
3055                         tx_head = (void *)tx_q->dma_tx;
3056
3057                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3058
3059                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3060                 print_pkt(skb->data, skb->len);
3061         }
3062
3063         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3064                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3065                           __func__);
3066                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3067         }
3068
3069         dev->stats.tx_bytes += skb->len;
3070
3071         /* According to the coalesce parameter the IC bit for the latest
3072          * segment is reset and the timer re-started to clean the tx status.
3073          * This approach takes care about the fragments: desc is the first
3074          * element in case of no SG.
3075          */
3076         priv->tx_count_frames += nfrags + 1;
3077         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3078                 mod_timer(&priv->txtimer,
3079                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3080         } else {
3081                 priv->tx_count_frames = 0;
3082                 priv->hw->desc->set_tx_ic(desc);
3083                 priv->xstats.tx_set_ic_bit++;
3084         }
3085
3086         if (!priv->hwts_tx_en)
3087                 skb_tx_timestamp(skb);
3088
3089         /* Ready to fill the first descriptor and set the OWN bit w/o any
3090          * problems because all the descriptors are actually ready to be
3091          * passed to the DMA engine.
3092          */
3093         if (likely(!is_jumbo)) {
3094                 bool last_segment = (nfrags == 0);
3095
3096                 des = dma_map_single(priv->device, skb->data,
3097                                      nopaged_len, DMA_TO_DEVICE);
3098                 if (dma_mapping_error(priv->device, des))
3099                         goto dma_map_err;
3100
3101                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3102                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3103                         first->des0 = cpu_to_le32(des);
3104                 else
3105                         first->des2 = cpu_to_le32(des);
3106
3107                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3108                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3109
3110                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3111                              priv->hwts_tx_en)) {
3112                         /* declare that device is doing timestamping */
3113                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3114                         priv->hw->desc->enable_tx_timestamp(first);
3115                 }
3116
3117                 /* Prepare the first descriptor setting the OWN bit too */
3118                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3119                                                 csum_insertion, priv->mode, 1,
3120                                                 last_segment, skb->len);
3121
3122                 /* The own bit must be the latest setting done when prepare the
3123                  * descriptor and then barrier is needed to make sure that
3124                  * all is coherent before granting the DMA engine.
3125                  */
3126                 dma_wmb();
3127         }
3128
3129         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3130
3131         if (priv->synopsys_id < DWMAC_CORE_4_00)
3132                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3133         else
3134                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3135                                                queue);
3136
3137         return NETDEV_TX_OK;
3138
3139 dma_map_err:
3140         netdev_err(priv->dev, "Tx DMA map failed\n");
3141         dev_kfree_skb(skb);
3142         priv->dev->stats.tx_dropped++;
3143         return NETDEV_TX_OK;
3144 }
3145
3146 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3147 {
3148         struct ethhdr *ehdr;
3149         u16 vlanid;
3150
3151         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3152             NETIF_F_HW_VLAN_CTAG_RX &&
3153             !__vlan_get_tag(skb, &vlanid)) {
3154                 /* pop the vlan tag */
3155                 ehdr = (struct ethhdr *)skb->data;
3156                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3157                 skb_pull(skb, VLAN_HLEN);
3158                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3159         }
3160 }
3161
3162
3163 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3164 {
3165         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3166                 return 0;
3167
3168         return 1;
3169 }
3170
3171 /**
3172  * stmmac_rx_refill - refill used skb preallocated buffers
3173  * @priv: driver private structure
3174  * @queue: RX queue index
3175  * Description : this is to reallocate the skb for the reception process
3176  * that is based on zero-copy.
3177  */
3178 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3179 {
3180         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3181         int dirty = stmmac_rx_dirty(priv, queue);
3182         unsigned int entry = rx_q->dirty_rx;
3183
3184         int bfsize = priv->dma_buf_sz;
3185
3186         while (dirty-- > 0) {
3187                 struct dma_desc *p;
3188
3189                 if (priv->extend_desc)
3190                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3191                 else
3192                         p = rx_q->dma_rx + entry;
3193
3194                 if (likely(!rx_q->rx_skbuff[entry])) {
3195                         struct sk_buff *skb;
3196
3197                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3198                         if (unlikely(!skb)) {
3199                                 /* so for a while no zero-copy! */
3200                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3201                                 if (unlikely(net_ratelimit()))
3202                                         dev_err(priv->device,
3203                                                 "fail to alloc skb entry %d\n",
3204                                                 entry);
3205                                 break;
3206                         }
3207
3208                         rx_q->rx_skbuff[entry] = skb;
3209                         rx_q->rx_skbuff_dma[entry] =
3210                             dma_map_single(priv->device, skb->data, bfsize,
3211                                            DMA_FROM_DEVICE);
3212                         if (dma_mapping_error(priv->device,
3213                                               rx_q->rx_skbuff_dma[entry])) {
3214                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3215                                 dev_kfree_skb(skb);
3216                                 break;
3217                         }
3218
3219                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3220                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3221                                 p->des1 = 0;
3222                         } else {
3223                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3224                         }
3225                         if (priv->hw->mode->refill_desc3)
3226                                 priv->hw->mode->refill_desc3(rx_q, p);
3227
3228                         if (rx_q->rx_zeroc_thresh > 0)
3229                                 rx_q->rx_zeroc_thresh--;
3230
3231                         netif_dbg(priv, rx_status, priv->dev,
3232                                   "refill entry #%d\n", entry);
3233                 }
3234                 dma_wmb();
3235
3236                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3237                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3238                 else
3239                         priv->hw->desc->set_rx_owner(p);
3240
3241                 dma_wmb();
3242
3243                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3244         }
3245         rx_q->dirty_rx = entry;
3246 }
3247
3248 /**
3249  * stmmac_rx - manage the receive process
3250  * @priv: driver private structure
3251  * @limit: napi bugget
3252  * @queue: RX queue index.
3253  * Description :  this the function called by the napi poll method.
3254  * It gets all the frames inside the ring.
3255  */
3256 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3257 {
3258         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3259         unsigned int entry = rx_q->cur_rx;
3260         int coe = priv->hw->rx_csum;
3261         unsigned int next_entry;
3262         unsigned int count = 0;
3263
3264         if (netif_msg_rx_status(priv)) {
3265                 void *rx_head;
3266
3267                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3268                 if (priv->extend_desc)
3269                         rx_head = (void *)rx_q->dma_erx;
3270                 else
3271                         rx_head = (void *)rx_q->dma_rx;
3272
3273                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3274         }
3275         while (count < limit) {
3276                 int status;
3277                 struct dma_desc *p;
3278                 struct dma_desc *np;
3279
3280                 if (priv->extend_desc)
3281                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3282                 else
3283                         p = rx_q->dma_rx + entry;
3284
3285                 /* read the status of the incoming frame */
3286                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3287                                                    &priv->xstats, p);
3288                 /* check if managed by the DMA otherwise go ahead */
3289                 if (unlikely(status & dma_own))
3290                         break;
3291
3292                 count++;
3293
3294                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3295                 next_entry = rx_q->cur_rx;
3296
3297                 if (priv->extend_desc)
3298                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3299                 else
3300                         np = rx_q->dma_rx + next_entry;
3301
3302                 prefetch(np);
3303
3304                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3305                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3306                                                            &priv->xstats,
3307                                                            rx_q->dma_erx +
3308                                                            entry);
3309                 if (unlikely(status == discard_frame)) {
3310                         priv->dev->stats.rx_errors++;
3311                         if (priv->hwts_rx_en && !priv->extend_desc) {
3312                                 /* DESC2 & DESC3 will be overwritten by device
3313                                  * with timestamp value, hence reinitialize
3314                                  * them in stmmac_rx_refill() function so that
3315                                  * device can reuse it.
3316                                  */
3317                                 rx_q->rx_skbuff[entry] = NULL;
3318                                 dma_unmap_single(priv->device,
3319                                                  rx_q->rx_skbuff_dma[entry],
3320                                                  priv->dma_buf_sz,
3321                                                  DMA_FROM_DEVICE);
3322                         }
3323                 } else {
3324                         struct sk_buff *skb;
3325                         int frame_len;
3326                         unsigned int des;
3327
3328                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3329                                 des = le32_to_cpu(p->des0);
3330                         else
3331                                 des = le32_to_cpu(p->des2);
3332
3333                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3334
3335                         /*  If frame length is greater than skb buffer size
3336                          *  (preallocated during init) then the packet is
3337                          *  ignored
3338                          */
3339                         if (frame_len > priv->dma_buf_sz) {
3340                                 netdev_err(priv->dev,
3341                                            "len %d larger than size (%d)\n",
3342                                            frame_len, priv->dma_buf_sz);
3343                                 priv->dev->stats.rx_length_errors++;
3344                                 break;
3345                         }
3346
3347                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3348                          * Type frames (LLC/LLC-SNAP)
3349                          */
3350                         if (unlikely(status != llc_snap))
3351                                 frame_len -= ETH_FCS_LEN;
3352
3353                         if (netif_msg_rx_status(priv)) {
3354                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3355                                            p, entry, des);
3356                                 if (frame_len > ETH_FRAME_LEN)
3357                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3358                                                    frame_len, status);
3359                         }
3360
3361                         /* The zero-copy is always used for all the sizes
3362                          * in case of GMAC4 because it needs
3363                          * to refill the used descriptors, always.
3364                          */
3365                         if (unlikely(!priv->plat->has_gmac4 &&
3366                                      ((frame_len < priv->rx_copybreak) ||
3367                                      stmmac_rx_threshold_count(rx_q)))) {
3368                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3369                                                                 frame_len);
3370                                 if (unlikely(!skb)) {
3371                                         if (net_ratelimit())
3372                                                 dev_warn(priv->device,
3373                                                          "packet dropped\n");
3374                                         priv->dev->stats.rx_dropped++;
3375                                         break;
3376                                 }
3377
3378                                 dma_sync_single_for_cpu(priv->device,
3379                                                         rx_q->rx_skbuff_dma
3380                                                         [entry], frame_len,
3381                                                         DMA_FROM_DEVICE);
3382                                 skb_copy_to_linear_data(skb,
3383                                                         rx_q->
3384                                                         rx_skbuff[entry]->data,
3385                                                         frame_len);
3386
3387                                 skb_put(skb, frame_len);
3388                                 dma_sync_single_for_device(priv->device,
3389                                                            rx_q->rx_skbuff_dma
3390                                                            [entry], frame_len,
3391                                                            DMA_FROM_DEVICE);
3392                         } else {
3393                                 skb = rx_q->rx_skbuff[entry];
3394                                 if (unlikely(!skb)) {
3395                                         netdev_err(priv->dev,
3396                                                    "%s: Inconsistent Rx chain\n",
3397                                                    priv->dev->name);
3398                                         priv->dev->stats.rx_dropped++;
3399                                         break;
3400                                 }
3401                                 prefetch(skb->data - NET_IP_ALIGN);
3402                                 rx_q->rx_skbuff[entry] = NULL;
3403                                 rx_q->rx_zeroc_thresh++;
3404
3405                                 skb_put(skb, frame_len);
3406                                 dma_unmap_single(priv->device,
3407                                                  rx_q->rx_skbuff_dma[entry],
3408                                                  priv->dma_buf_sz,
3409                                                  DMA_FROM_DEVICE);
3410                         }
3411
3412                         if (netif_msg_pktdata(priv)) {
3413                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3414                                            frame_len);
3415                                 print_pkt(skb->data, frame_len);
3416                         }
3417
3418                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3419
3420                         stmmac_rx_vlan(priv->dev, skb);
3421
3422                         skb->protocol = eth_type_trans(skb, priv->dev);
3423
3424                         if (unlikely(!coe))
3425                                 skb_checksum_none_assert(skb);
3426                         else
3427                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3428
3429                         napi_gro_receive(&rx_q->napi, skb);
3430
3431                         priv->dev->stats.rx_packets++;
3432                         priv->dev->stats.rx_bytes += frame_len;
3433                 }
3434                 entry = next_entry;
3435         }
3436
3437         stmmac_rx_refill(priv, queue);
3438
3439         priv->xstats.rx_pkt_n += count;
3440
3441         return count;
3442 }
3443
3444 /**
3445  *  stmmac_poll - stmmac poll method (NAPI)
3446  *  @napi : pointer to the napi structure.
3447  *  @budget : maximum number of packets that the current CPU can receive from
3448  *            all interfaces.
3449  *  Description :
3450  *  To look at the incoming frames and clear the tx resources.
3451  */
3452 static int stmmac_poll(struct napi_struct *napi, int budget)
3453 {
3454         struct stmmac_rx_queue *rx_q =
3455                 container_of(napi, struct stmmac_rx_queue, napi);
3456         struct stmmac_priv *priv = rx_q->priv_data;
3457         u32 tx_count = priv->plat->tx_queues_to_use;
3458         u32 chan = rx_q->queue_index;
3459         int work_done = 0;
3460         u32 queue;
3461
3462         priv->xstats.napi_poll++;
3463
3464         /* check all the queues */
3465         for (queue = 0; queue < tx_count; queue++)
3466                 stmmac_tx_clean(priv, queue);
3467
3468         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3469         if (work_done < budget) {
3470                 napi_complete_done(napi, work_done);
3471                 stmmac_enable_dma_irq(priv, chan);
3472         }
3473         return work_done;
3474 }
3475
3476 /**
3477  *  stmmac_tx_timeout
3478  *  @dev : Pointer to net device structure
3479  *  Description: this function is called when a packet transmission fails to
3480  *   complete within a reasonable time. The driver will mark the error in the
3481  *   netdev structure and arrange for the device to be reset to a sane state
3482  *   in order to transmit a new packet.
3483  */
3484 static void stmmac_tx_timeout(struct net_device *dev)
3485 {
3486         struct stmmac_priv *priv = netdev_priv(dev);
3487         u32 tx_count = priv->plat->tx_queues_to_use;
3488         u32 chan;
3489
3490         /* Clear Tx resources and restart transmitting again */
3491         for (chan = 0; chan < tx_count; chan++)
3492                 stmmac_tx_err(priv, chan);
3493 }
3494
3495 /**
3496  *  stmmac_set_rx_mode - entry point for multicast addressing
3497  *  @dev : pointer to the device structure
3498  *  Description:
3499  *  This function is a driver entry point which gets called by the kernel
3500  *  whenever multicast addresses must be enabled/disabled.
3501  *  Return value:
3502  *  void.
3503  */
3504 static void stmmac_set_rx_mode(struct net_device *dev)
3505 {
3506         struct stmmac_priv *priv = netdev_priv(dev);
3507
3508         priv->hw->mac->set_filter(priv->hw, dev);
3509 }
3510
3511 /**
3512  *  stmmac_change_mtu - entry point to change MTU size for the device.
3513  *  @dev : device pointer.
3514  *  @new_mtu : the new MTU size for the device.
3515  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3516  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3517  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3518  *  Return value:
3519  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3520  *  file on failure.
3521  */
3522 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3523 {
3524         struct stmmac_priv *priv = netdev_priv(dev);
3525
3526         if (netif_running(dev)) {
3527                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3528                 return -EBUSY;
3529         }
3530
3531         dev->mtu = new_mtu;
3532
3533         netdev_update_features(dev);
3534
3535         return 0;
3536 }
3537
3538 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3539                                              netdev_features_t features)
3540 {
3541         struct stmmac_priv *priv = netdev_priv(dev);
3542
3543         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3544                 features &= ~NETIF_F_RXCSUM;
3545
3546         if (!priv->plat->tx_coe)
3547                 features &= ~NETIF_F_CSUM_MASK;
3548
3549         /* Some GMAC devices have a bugged Jumbo frame support that
3550          * needs to have the Tx COE disabled for oversized frames
3551          * (due to limited buffer sizes). In this case we disable
3552          * the TX csum insertion in the TDES and not use SF.
3553          */
3554         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3555                 features &= ~NETIF_F_CSUM_MASK;
3556
3557         /* Disable tso if asked by ethtool */
3558         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3559                 if (features & NETIF_F_TSO)
3560                         priv->tso = true;
3561                 else
3562                         priv->tso = false;
3563         }
3564
3565         return features;
3566 }
3567
3568 static int stmmac_set_features(struct net_device *netdev,
3569                                netdev_features_t features)
3570 {
3571         struct stmmac_priv *priv = netdev_priv(netdev);
3572
3573         /* Keep the COE Type in case of csum is supporting */
3574         if (features & NETIF_F_RXCSUM)
3575                 priv->hw->rx_csum = priv->plat->rx_coe;
3576         else
3577                 priv->hw->rx_csum = 0;
3578         /* No check needed because rx_coe has been set before and it will be
3579          * fixed in case of issue.
3580          */
3581         priv->hw->mac->rx_ipc(priv->hw);
3582
3583         return 0;
3584 }
3585
3586 /**
3587  *  stmmac_interrupt - main ISR
3588  *  @irq: interrupt number.
3589  *  @dev_id: to pass the net device pointer.
3590  *  Description: this is the main driver interrupt service routine.
3591  *  It can call:
3592  *  o DMA service routine (to manage incoming frame reception and transmission
3593  *    status)
3594  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3595  *    interrupts.
3596  */
3597 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3598 {
3599         struct net_device *dev = (struct net_device *)dev_id;
3600         struct stmmac_priv *priv = netdev_priv(dev);
3601         u32 rx_cnt = priv->plat->rx_queues_to_use;
3602         u32 tx_cnt = priv->plat->tx_queues_to_use;
3603         u32 queues_count;
3604         u32 queue;
3605
3606         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3607
3608         if (priv->irq_wake)
3609                 pm_wakeup_event(priv->device, 0);
3610
3611         if (unlikely(!dev)) {
3612                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3613                 return IRQ_NONE;
3614         }
3615
3616         /* To handle GMAC own interrupts */
3617         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3618                 int status = priv->hw->mac->host_irq_status(priv->hw,
3619                                                             &priv->xstats);
3620
3621                 if (unlikely(status)) {
3622                         /* For LPI we need to save the tx status */
3623                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3624                                 priv->tx_path_in_lpi_mode = true;
3625                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3626                                 priv->tx_path_in_lpi_mode = false;
3627                 }
3628
3629                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3630                         for (queue = 0; queue < queues_count; queue++) {
3631                                 struct stmmac_rx_queue *rx_q =
3632                                 &priv->rx_queue[queue];
3633
3634                                 status |=
3635                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3636                                                                    queue);
3637
3638                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3639                                     priv->hw->dma->set_rx_tail_ptr)
3640                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3641                                                                 rx_q->rx_tail_addr,
3642                                                                 queue);
3643                         }
3644                 }
3645
3646                 /* PCS link status */
3647                 if (priv->hw->pcs) {
3648                         if (priv->xstats.pcs_link)
3649                                 netif_carrier_on(dev);
3650                         else
3651                                 netif_carrier_off(dev);
3652                 }
3653         }
3654
3655         /* To handle DMA interrupts */
3656         stmmac_dma_interrupt(priv);
3657
3658         return IRQ_HANDLED;
3659 }
3660
3661 #ifdef CONFIG_NET_POLL_CONTROLLER
3662 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3663  * to allow network I/O with interrupts disabled.
3664  */
3665 static void stmmac_poll_controller(struct net_device *dev)
3666 {
3667         disable_irq(dev->irq);
3668         stmmac_interrupt(dev->irq, dev);
3669         enable_irq(dev->irq);
3670 }
3671 #endif
3672
3673 /**
3674  *  stmmac_ioctl - Entry point for the Ioctl
3675  *  @dev: Device pointer.
3676  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3677  *  a proprietary structure used to pass information to the driver.
3678  *  @cmd: IOCTL command
3679  *  Description:
3680  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3681  */
3682 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3683 {
3684         int ret = -EOPNOTSUPP;
3685
3686         if (!netif_running(dev))
3687                 return -EINVAL;
3688
3689         switch (cmd) {
3690         case SIOCGMIIPHY:
3691         case SIOCGMIIREG:
3692         case SIOCSMIIREG:
3693                 if (!dev->phydev)
3694                         return -EINVAL;
3695                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3696                 break;
3697         case SIOCSHWTSTAMP:
3698                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3699                 break;
3700         default:
3701                 break;
3702         }
3703
3704         return ret;
3705 }
3706
3707 #ifdef CONFIG_DEBUG_FS
3708 static struct dentry *stmmac_fs_dir;
3709
3710 static void sysfs_display_ring(void *head, int size, int extend_desc,
3711                                struct seq_file *seq)
3712 {
3713         int i;
3714         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3715         struct dma_desc *p = (struct dma_desc *)head;
3716
3717         for (i = 0; i < size; i++) {
3718                 if (extend_desc) {
3719                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3720                                    i, (unsigned int)virt_to_phys(ep),
3721                                    le32_to_cpu(ep->basic.des0),
3722                                    le32_to_cpu(ep->basic.des1),
3723                                    le32_to_cpu(ep->basic.des2),
3724                                    le32_to_cpu(ep->basic.des3));
3725                         ep++;
3726                 } else {
3727                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3728                                    i, (unsigned int)virt_to_phys(ep),
3729                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3730                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3731                         p++;
3732                 }
3733                 seq_printf(seq, "\n");
3734         }
3735 }
3736
3737 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3738 {
3739         struct net_device *dev = seq->private;
3740         struct stmmac_priv *priv = netdev_priv(dev);
3741         u32 rx_count = priv->plat->rx_queues_to_use;
3742         u32 tx_count = priv->plat->tx_queues_to_use;
3743         u32 queue;
3744
3745         for (queue = 0; queue < rx_count; queue++) {
3746                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3747
3748                 seq_printf(seq, "RX Queue %d:\n", queue);
3749
3750                 if (priv->extend_desc) {
3751                         seq_printf(seq, "Extended descriptor ring:\n");
3752                         sysfs_display_ring((void *)rx_q->dma_erx,
3753                                            DMA_RX_SIZE, 1, seq);
3754                 } else {
3755                         seq_printf(seq, "Descriptor ring:\n");
3756                         sysfs_display_ring((void *)rx_q->dma_rx,
3757                                            DMA_RX_SIZE, 0, seq);
3758                 }
3759         }
3760
3761         for (queue = 0; queue < tx_count; queue++) {
3762                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3763
3764                 seq_printf(seq, "TX Queue %d:\n", queue);
3765
3766                 if (priv->extend_desc) {
3767                         seq_printf(seq, "Extended descriptor ring:\n");
3768                         sysfs_display_ring((void *)tx_q->dma_etx,
3769                                            DMA_TX_SIZE, 1, seq);
3770                 } else {
3771                         seq_printf(seq, "Descriptor ring:\n");
3772                         sysfs_display_ring((void *)tx_q->dma_tx,
3773                                            DMA_TX_SIZE, 0, seq);
3774                 }
3775         }
3776
3777         return 0;
3778 }
3779
3780 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3781 {
3782         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3783 }
3784
3785 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3786
3787 static const struct file_operations stmmac_rings_status_fops = {
3788         .owner = THIS_MODULE,
3789         .open = stmmac_sysfs_ring_open,
3790         .read = seq_read,
3791         .llseek = seq_lseek,
3792         .release = single_release,
3793 };
3794
3795 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3796 {
3797         struct net_device *dev = seq->private;
3798         struct stmmac_priv *priv = netdev_priv(dev);
3799
3800         if (!priv->hw_cap_support) {
3801                 seq_printf(seq, "DMA HW features not supported\n");
3802                 return 0;
3803         }
3804
3805         seq_printf(seq, "==============================\n");
3806         seq_printf(seq, "\tDMA HW features\n");
3807         seq_printf(seq, "==============================\n");
3808
3809         seq_printf(seq, "\t10/100 Mbps: %s\n",
3810                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3811         seq_printf(seq, "\t1000 Mbps: %s\n",
3812                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3813         seq_printf(seq, "\tHalf duplex: %s\n",
3814                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3815         seq_printf(seq, "\tHash Filter: %s\n",
3816                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3817         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3818                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3819         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3820                    (priv->dma_cap.pcs) ? "Y" : "N");
3821         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3822                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3823         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3824                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3825         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3826                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3827         seq_printf(seq, "\tRMON module: %s\n",
3828                    (priv->dma_cap.rmon) ? "Y" : "N");
3829         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3830                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3831         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3832                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3833         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3834                    (priv->dma_cap.eee) ? "Y" : "N");
3835         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3836         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3837                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3838         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3839                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3840                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3841         } else {
3842                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3843                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3844                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3845                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3846         }
3847         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3848                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3849         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3850                    priv->dma_cap.number_rx_channel);
3851         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3852                    priv->dma_cap.number_tx_channel);
3853         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3854                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3855
3856         return 0;
3857 }
3858
3859 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3860 {
3861         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3862 }
3863
3864 static const struct file_operations stmmac_dma_cap_fops = {
3865         .owner = THIS_MODULE,
3866         .open = stmmac_sysfs_dma_cap_open,
3867         .read = seq_read,
3868         .llseek = seq_lseek,
3869         .release = single_release,
3870 };
3871
3872 static int stmmac_init_fs(struct net_device *dev)
3873 {
3874         struct stmmac_priv *priv = netdev_priv(dev);
3875
3876         /* Create per netdev entries */
3877         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3878
3879         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3880                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3881
3882                 return -ENOMEM;
3883         }
3884
3885         /* Entry to report DMA RX/TX rings */
3886         priv->dbgfs_rings_status =
3887                 debugfs_create_file("descriptors_status", S_IRUGO,
3888                                     priv->dbgfs_dir, dev,
3889                                     &stmmac_rings_status_fops);
3890
3891         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3892                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3893                 debugfs_remove_recursive(priv->dbgfs_dir);
3894
3895                 return -ENOMEM;
3896         }
3897
3898         /* Entry to report the DMA HW features */
3899         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3900                                             priv->dbgfs_dir,
3901                                             dev, &stmmac_dma_cap_fops);
3902
3903         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3904                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3905                 debugfs_remove_recursive(priv->dbgfs_dir);
3906
3907                 return -ENOMEM;
3908         }
3909
3910         return 0;
3911 }
3912
3913 static void stmmac_exit_fs(struct net_device *dev)
3914 {
3915         struct stmmac_priv *priv = netdev_priv(dev);
3916
3917         debugfs_remove_recursive(priv->dbgfs_dir);
3918 }
3919 #endif /* CONFIG_DEBUG_FS */
3920
3921 static const struct net_device_ops stmmac_netdev_ops = {
3922         .ndo_open = stmmac_open,
3923         .ndo_start_xmit = stmmac_xmit,
3924         .ndo_stop = stmmac_release,
3925         .ndo_change_mtu = stmmac_change_mtu,
3926         .ndo_fix_features = stmmac_fix_features,
3927         .ndo_set_features = stmmac_set_features,
3928         .ndo_set_rx_mode = stmmac_set_rx_mode,
3929         .ndo_tx_timeout = stmmac_tx_timeout,
3930         .ndo_do_ioctl = stmmac_ioctl,
3931 #ifdef CONFIG_NET_POLL_CONTROLLER
3932         .ndo_poll_controller = stmmac_poll_controller,
3933 #endif
3934         .ndo_set_mac_address = eth_mac_addr,
3935 };
3936
3937 /**
3938  *  stmmac_hw_init - Init the MAC device
3939  *  @priv: driver private structure
3940  *  Description: this function is to configure the MAC device according to
3941  *  some platform parameters or the HW capability register. It prepares the
3942  *  driver to use either ring or chain modes and to setup either enhanced or
3943  *  normal descriptors.
3944  */
3945 static int stmmac_hw_init(struct stmmac_priv *priv)
3946 {
3947         struct mac_device_info *mac;
3948
3949         /* Identify the MAC HW device */
3950         if (priv->plat->has_gmac) {
3951                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3952                 mac = dwmac1000_setup(priv->ioaddr,
3953                                       priv->plat->multicast_filter_bins,
3954                                       priv->plat->unicast_filter_entries,
3955                                       &priv->synopsys_id);
3956         } else if (priv->plat->has_gmac4) {
3957                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3958                 mac = dwmac4_setup(priv->ioaddr,
3959                                    priv->plat->multicast_filter_bins,
3960                                    priv->plat->unicast_filter_entries,
3961                                    &priv->synopsys_id);
3962         } else {
3963                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3964         }
3965         if (!mac)
3966                 return -ENOMEM;
3967
3968         priv->hw = mac;
3969
3970         /* To use the chained or ring mode */
3971         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3972                 priv->hw->mode = &dwmac4_ring_mode_ops;
3973         } else {
3974                 if (chain_mode) {
3975                         priv->hw->mode = &chain_mode_ops;
3976                         dev_info(priv->device, "Chain mode enabled\n");
3977                         priv->mode = STMMAC_CHAIN_MODE;
3978                 } else {
3979                         priv->hw->mode = &ring_mode_ops;
3980                         dev_info(priv->device, "Ring mode enabled\n");
3981                         priv->mode = STMMAC_RING_MODE;
3982                 }
3983         }
3984
3985         /* Get the HW capability (new GMAC newer than 3.50a) */
3986         priv->hw_cap_support = stmmac_get_hw_features(priv);
3987         if (priv->hw_cap_support) {
3988                 dev_info(priv->device, "DMA HW capability register supported\n");
3989
3990                 /* We can override some gmac/dma configuration fields: e.g.
3991                  * enh_desc, tx_coe (e.g. that are passed through the
3992                  * platform) with the values from the HW capability
3993                  * register (if supported).
3994                  */
3995                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3996                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3997                 priv->hw->pmt = priv->plat->pmt;
3998
3999                 /* TXCOE doesn't work in thresh DMA mode */
4000                 if (priv->plat->force_thresh_dma_mode)
4001                         priv->plat->tx_coe = 0;
4002                 else
4003                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4004
4005                 /* In case of GMAC4 rx_coe is from HW cap register. */
4006                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4007
4008                 if (priv->dma_cap.rx_coe_type2)
4009                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4010                 else if (priv->dma_cap.rx_coe_type1)
4011                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4012
4013         } else {
4014                 dev_info(priv->device, "No HW DMA feature register supported\n");
4015         }
4016
4017         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4018         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4019                 priv->hw->desc = &dwmac4_desc_ops;
4020         else
4021                 stmmac_selec_desc_mode(priv);
4022
4023         if (priv->plat->rx_coe) {
4024                 priv->hw->rx_csum = priv->plat->rx_coe;
4025                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4026                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4027                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4028         }
4029         if (priv->plat->tx_coe)
4030                 dev_info(priv->device, "TX Checksum insertion supported\n");
4031
4032         if (priv->plat->pmt) {
4033                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4034                 device_set_wakeup_capable(priv->device, 1);
4035         }
4036
4037         if (priv->dma_cap.tsoen)
4038                 dev_info(priv->device, "TSO supported\n");
4039
4040         return 0;
4041 }
4042
4043 /**
4044  * stmmac_dvr_probe
4045  * @device: device pointer
4046  * @plat_dat: platform data pointer
4047  * @res: stmmac resource pointer
4048  * Description: this is the main probe function used to
4049  * call the alloc_etherdev, allocate the priv structure.
4050  * Return:
4051  * returns 0 on success, otherwise errno.
4052  */
4053 int stmmac_dvr_probe(struct device *device,
4054                      struct plat_stmmacenet_data *plat_dat,
4055                      struct stmmac_resources *res)
4056 {
4057         struct net_device *ndev = NULL;
4058         struct stmmac_priv *priv;
4059         int ret = 0;
4060         u32 queue;
4061
4062         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4063                                   MTL_MAX_TX_QUEUES,
4064                                   MTL_MAX_RX_QUEUES);
4065         if (!ndev)
4066                 return -ENOMEM;
4067
4068         SET_NETDEV_DEV(ndev, device);
4069
4070         priv = netdev_priv(ndev);
4071         priv->device = device;
4072         priv->dev = ndev;
4073
4074         stmmac_set_ethtool_ops(ndev);
4075         priv->pause = pause;
4076         priv->plat = plat_dat;
4077         priv->ioaddr = res->addr;
4078         priv->dev->base_addr = (unsigned long)res->addr;
4079
4080         priv->dev->irq = res->irq;
4081         priv->wol_irq = res->wol_irq;
4082         priv->lpi_irq = res->lpi_irq;
4083
4084         if (res->mac)
4085                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4086
4087         dev_set_drvdata(device, priv->dev);
4088
4089         /* Verify driver arguments */
4090         stmmac_verify_args();
4091
4092         /* Override with kernel parameters if supplied XXX CRS XXX
4093          * this needs to have multiple instances
4094          */
4095         if ((phyaddr >= 0) && (phyaddr <= 31))
4096                 priv->plat->phy_addr = phyaddr;
4097
4098         if (priv->plat->stmmac_rst)
4099                 reset_control_deassert(priv->plat->stmmac_rst);
4100
4101         /* Init MAC and get the capabilities */
4102         ret = stmmac_hw_init(priv);
4103         if (ret)
4104                 goto error_hw_init;
4105
4106         /* Configure real RX and TX queues */
4107         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4108         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4109
4110         ndev->netdev_ops = &stmmac_netdev_ops;
4111
4112         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4113                             NETIF_F_RXCSUM;
4114
4115         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4116                 ndev->hw_features |= NETIF_F_TSO;
4117                 priv->tso = true;
4118                 dev_info(priv->device, "TSO feature enabled\n");
4119         }
4120         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4121         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4122 #ifdef STMMAC_VLAN_TAG_USED
4123         /* Both mac100 and gmac support receive VLAN tag detection */
4124         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4125 #endif
4126         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4127
4128         /* MTU range: 46 - hw-specific max */
4129         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4130         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4131                 ndev->max_mtu = JUMBO_LEN;
4132         else
4133                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4134         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4135          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4136          */
4137         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4138             (priv->plat->maxmtu >= ndev->min_mtu))
4139                 ndev->max_mtu = priv->plat->maxmtu;
4140         else if (priv->plat->maxmtu < ndev->min_mtu)
4141                 dev_warn(priv->device,
4142                          "%s: warning: maxmtu having invalid value (%d)\n",
4143                          __func__, priv->plat->maxmtu);
4144
4145         if (flow_ctrl)
4146                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4147
4148         /* Rx Watchdog is available in the COREs newer than the 3.40.
4149          * In some case, for example on bugged HW this feature
4150          * has to be disable and this can be done by passing the
4151          * riwt_off field from the platform.
4152          */
4153         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4154                 priv->use_riwt = 1;
4155                 dev_info(priv->device,
4156                          "Enable RX Mitigation via HW Watchdog Timer\n");
4157         }
4158
4159         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4160                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4161
4162                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4163                                (8 * priv->plat->rx_queues_to_use));
4164         }
4165
4166         spin_lock_init(&priv->lock);
4167
4168         /* If a specific clk_csr value is passed from the platform
4169          * this means that the CSR Clock Range selection cannot be
4170          * changed at run-time and it is fixed. Viceversa the driver'll try to
4171          * set the MDC clock dynamically according to the csr actual
4172          * clock input.
4173          */
4174         if (!priv->plat->clk_csr)
4175                 stmmac_clk_csr_set(priv);
4176         else
4177                 priv->clk_csr = priv->plat->clk_csr;
4178
4179         stmmac_check_pcs_mode(priv);
4180
4181         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4182             priv->hw->pcs != STMMAC_PCS_TBI &&
4183             priv->hw->pcs != STMMAC_PCS_RTBI) {
4184                 /* MDIO bus Registration */
4185                 ret = stmmac_mdio_register(ndev);
4186                 if (ret < 0) {
4187                         dev_err(priv->device,
4188                                 "%s: MDIO bus (id: %d) registration failed",
4189                                 __func__, priv->plat->bus_id);
4190                         goto error_mdio_register;
4191                 }
4192         }
4193
4194         ret = register_netdev(ndev);
4195         if (ret) {
4196                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4197                         __func__, ret);
4198                 goto error_netdev_register;
4199         }
4200
4201         return ret;
4202
4203 error_netdev_register:
4204         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4205             priv->hw->pcs != STMMAC_PCS_TBI &&
4206             priv->hw->pcs != STMMAC_PCS_RTBI)
4207                 stmmac_mdio_unregister(ndev);
4208 error_mdio_register:
4209         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4210                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4211
4212                 netif_napi_del(&rx_q->napi);
4213         }
4214 error_hw_init:
4215         free_netdev(ndev);
4216
4217         return ret;
4218 }
4219 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4220
4221 /**
4222  * stmmac_dvr_remove
4223  * @dev: device pointer
4224  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4225  * changes the link status, releases the DMA descriptor rings.
4226  */
4227 int stmmac_dvr_remove(struct device *dev)
4228 {
4229         struct net_device *ndev = dev_get_drvdata(dev);
4230         struct stmmac_priv *priv = netdev_priv(ndev);
4231
4232         netdev_info(priv->dev, "%s: removing driver", __func__);
4233
4234         stmmac_stop_all_dma(priv);
4235
4236         priv->hw->mac->set_mac(priv->ioaddr, false);
4237         netif_carrier_off(ndev);
4238         unregister_netdev(ndev);
4239         if (priv->plat->stmmac_rst)
4240                 reset_control_assert(priv->plat->stmmac_rst);
4241         clk_disable_unprepare(priv->plat->pclk);
4242         clk_disable_unprepare(priv->plat->stmmac_clk);
4243         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4244             priv->hw->pcs != STMMAC_PCS_TBI &&
4245             priv->hw->pcs != STMMAC_PCS_RTBI)
4246                 stmmac_mdio_unregister(ndev);
4247         free_netdev(ndev);
4248
4249         return 0;
4250 }
4251 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4252
4253 /**
4254  * stmmac_suspend - suspend callback
4255  * @dev: device pointer
4256  * Description: this is the function to suspend the device and it is called
4257  * by the platform driver to stop the network queue, release the resources,
4258  * program the PMT register (for WoL), clean and release driver resources.
4259  */
4260 int stmmac_suspend(struct device *dev)
4261 {
4262         struct net_device *ndev = dev_get_drvdata(dev);
4263         struct stmmac_priv *priv = netdev_priv(ndev);
4264         unsigned long flags;
4265
4266         if (!ndev || !netif_running(ndev))
4267                 return 0;
4268
4269         if (ndev->phydev)
4270                 phy_stop(ndev->phydev);
4271
4272         spin_lock_irqsave(&priv->lock, flags);
4273
4274         netif_device_detach(ndev);
4275         stmmac_stop_all_queues(priv);
4276
4277         stmmac_disable_all_queues(priv);
4278
4279         /* Stop TX/RX DMA */
4280         stmmac_stop_all_dma(priv);
4281
4282         /* Enable Power down mode by programming the PMT regs */
4283         if (device_may_wakeup(priv->device)) {
4284                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4285                 priv->irq_wake = 1;
4286         } else {
4287                 priv->hw->mac->set_mac(priv->ioaddr, false);
4288                 pinctrl_pm_select_sleep_state(priv->device);
4289                 /* Disable clock in case of PWM is off */
4290                 clk_disable(priv->plat->pclk);
4291                 clk_disable(priv->plat->stmmac_clk);
4292         }
4293         spin_unlock_irqrestore(&priv->lock, flags);
4294
4295         priv->oldlink = 0;
4296         priv->speed = SPEED_UNKNOWN;
4297         priv->oldduplex = DUPLEX_UNKNOWN;
4298         return 0;
4299 }
4300 EXPORT_SYMBOL_GPL(stmmac_suspend);
4301
4302 /**
4303  * stmmac_reset_queues_param - reset queue parameters
4304  * @dev: device pointer
4305  */
4306 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4307 {
4308         u32 rx_cnt = priv->plat->rx_queues_to_use;
4309         u32 tx_cnt = priv->plat->tx_queues_to_use;
4310         u32 queue;
4311
4312         for (queue = 0; queue < rx_cnt; queue++) {
4313                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4314
4315                 rx_q->cur_rx = 0;
4316                 rx_q->dirty_rx = 0;
4317         }
4318
4319         for (queue = 0; queue < tx_cnt; queue++) {
4320                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4321
4322                 tx_q->cur_tx = 0;
4323                 tx_q->dirty_tx = 0;
4324         }
4325 }
4326
4327 /**
4328  * stmmac_resume - resume callback
4329  * @dev: device pointer
4330  * Description: when resume this function is invoked to setup the DMA and CORE
4331  * in a usable state.
4332  */
4333 int stmmac_resume(struct device *dev)
4334 {
4335         struct net_device *ndev = dev_get_drvdata(dev);
4336         struct stmmac_priv *priv = netdev_priv(ndev);
4337         unsigned long flags;
4338
4339         if (!netif_running(ndev))
4340                 return 0;
4341
4342         /* Power Down bit, into the PM register, is cleared
4343          * automatically as soon as a magic packet or a Wake-up frame
4344          * is received. Anyway, it's better to manually clear
4345          * this bit because it can generate problems while resuming
4346          * from another devices (e.g. serial console).
4347          */
4348         if (device_may_wakeup(priv->device)) {
4349                 spin_lock_irqsave(&priv->lock, flags);
4350                 priv->hw->mac->pmt(priv->hw, 0);
4351                 spin_unlock_irqrestore(&priv->lock, flags);
4352                 priv->irq_wake = 0;
4353         } else {
4354                 pinctrl_pm_select_default_state(priv->device);
4355                 /* enable the clk previously disabled */
4356                 clk_enable(priv->plat->stmmac_clk);
4357                 clk_enable(priv->plat->pclk);
4358                 /* reset the phy so that it's ready */
4359                 if (priv->mii)
4360                         stmmac_mdio_reset(priv->mii);
4361         }
4362
4363         netif_device_attach(ndev);
4364
4365         spin_lock_irqsave(&priv->lock, flags);
4366
4367         stmmac_reset_queues_param(priv);
4368
4369         /* reset private mss value to force mss context settings at
4370          * next tso xmit (only used for gmac4).
4371          */
4372         priv->mss = 0;
4373
4374         stmmac_clear_descriptors(priv);
4375
4376         stmmac_hw_setup(ndev, false);
4377         stmmac_init_tx_coalesce(priv);
4378         stmmac_set_rx_mode(ndev);
4379
4380         stmmac_enable_all_queues(priv);
4381
4382         stmmac_start_all_queues(priv);
4383
4384         spin_unlock_irqrestore(&priv->lock, flags);
4385
4386         if (ndev->phydev)
4387                 phy_start(ndev->phydev);
4388
4389         return 0;
4390 }
4391 EXPORT_SYMBOL_GPL(stmmac_resume);
4392
4393 #ifndef MODULE
4394 static int __init stmmac_cmdline_opt(char *str)
4395 {
4396         char *opt;
4397
4398         if (!str || !*str)
4399                 return -EINVAL;
4400         while ((opt = strsep(&str, ",")) != NULL) {
4401                 if (!strncmp(opt, "debug:", 6)) {
4402                         if (kstrtoint(opt + 6, 0, &debug))
4403                                 goto err;
4404                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4405                         if (kstrtoint(opt + 8, 0, &phyaddr))
4406                                 goto err;
4407                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4408                         if (kstrtoint(opt + 7, 0, &buf_sz))
4409                                 goto err;
4410                 } else if (!strncmp(opt, "tc:", 3)) {
4411                         if (kstrtoint(opt + 3, 0, &tc))
4412                                 goto err;
4413                 } else if (!strncmp(opt, "watchdog:", 9)) {
4414                         if (kstrtoint(opt + 9, 0, &watchdog))
4415                                 goto err;
4416                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4417                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4418                                 goto err;
4419                 } else if (!strncmp(opt, "pause:", 6)) {
4420                         if (kstrtoint(opt + 6, 0, &pause))
4421                                 goto err;
4422                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4423                         if (kstrtoint(opt + 10, 0, &eee_timer))
4424                                 goto err;
4425                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4426                         if (kstrtoint(opt + 11, 0, &chain_mode))
4427                                 goto err;
4428                 }
4429         }
4430         return 0;
4431
4432 err:
4433         pr_err("%s: ERROR broken module parameter conversion", __func__);
4434         return -EINVAL;
4435 }
4436
4437 __setup("stmmaceth=", stmmac_cmdline_opt);
4438 #endif /* MODULE */
4439
4440 static int __init stmmac_init(void)
4441 {
4442 #ifdef CONFIG_DEBUG_FS
4443         /* Create debugfs main directory if it doesn't exist yet */
4444         if (!stmmac_fs_dir) {
4445                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4446
4447                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4448                         pr_err("ERROR %s, debugfs create directory failed\n",
4449                                STMMAC_RESOURCE_NAME);
4450
4451                         return -ENOMEM;
4452                 }
4453         }
4454 #endif
4455
4456         return 0;
4457 }
4458
4459 static void __exit stmmac_exit(void)
4460 {
4461 #ifdef CONFIG_DEBUG_FS
4462         debugfs_remove_recursive(stmmac_fs_dir);
4463 #endif
4464 }
4465
4466 module_init(stmmac_init)
4467 module_exit(stmmac_exit)
4468
4469 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4470 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4471 MODULE_LICENSE("GPL");