]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: enable/disable dma irq prepared for multiple queues
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190         u32 avail;
191
192         if (priv->dirty_tx > priv->cur_tx)
193                 avail = priv->dirty_tx - priv->cur_tx - 1;
194         else
195                 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197         return avail;
198 }
199
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202         u32 dirty;
203
204         if (priv->dirty_rx <= priv->cur_rx)
205                 dirty = priv->cur_rx - priv->dirty_rx;
206         else
207                 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209         return dirty;
210 }
211
212 /**
213  * stmmac_hw_fix_mac_speed - callback for speed selection
214  * @priv: driver private structure
215  * Description: on some platforms (e.g. ST), some HW system configuration
216  * registers have to be set according to the link speed negotiated.
217  */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220         struct net_device *ndev = priv->dev;
221         struct phy_device *phydev = ndev->phydev;
222
223         if (likely(priv->plat->fix_mac_speed))
224                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228  * stmmac_enable_eee_mode - check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode in case of
231  * EEE.
232  */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235         /* Check and enter in LPI mode */
236         if ((priv->dirty_tx == priv->cur_tx) &&
237             (priv->tx_path_in_lpi_mode == false))
238                 priv->hw->mac->set_eee_mode(priv->hw,
239                                             priv->plat->en_tx_lpi_clockgating);
240 }
241
242 /**
243  * stmmac_disable_eee_mode - disable and exit from LPI mode
244  * @priv: driver private structure
245  * Description: this function is to exit and disable EEE in case of
246  * LPI state is true. This is called by the xmit.
247  */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250         priv->hw->mac->reset_eee_mode(priv->hw);
251         del_timer_sync(&priv->eee_ctrl_timer);
252         priv->tx_path_in_lpi_mode = false;
253 }
254
255 /**
256  * stmmac_eee_ctrl_timer - EEE TX SW timer.
257  * @arg : data hook
258  * Description:
259  *  if there is no data transfer and if we are not in LPI state,
260  *  then MAC Transmitter can be moved to LPI state.
261  */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266         stmmac_enable_eee_mode(priv);
267         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269
270 /**
271  * stmmac_eee_init - init EEE
272  * @priv: driver private structure
273  * Description:
274  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
275  *  can also manage EEE, this function enable the LPI state and start related
276  *  timer.
277  */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280         struct net_device *ndev = priv->dev;
281         unsigned long flags;
282         bool ret = false;
283
284         /* Using PCS we cannot dial with the phy registers at this stage
285          * so we do not support extra feature like EEE.
286          */
287         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288             (priv->hw->pcs == STMMAC_PCS_TBI) ||
289             (priv->hw->pcs == STMMAC_PCS_RTBI))
290                 goto out;
291
292         /* MAC core supports the EEE feature. */
293         if (priv->dma_cap.eee) {
294                 int tx_lpi_timer = priv->tx_lpi_timer;
295
296                 /* Check if the PHY supports EEE */
297                 if (phy_init_eee(ndev->phydev, 1)) {
298                         /* To manage at run-time if the EEE cannot be supported
299                          * anymore (for example because the lp caps have been
300                          * changed).
301                          * In that case the driver disable own timers.
302                          */
303                         spin_lock_irqsave(&priv->lock, flags);
304                         if (priv->eee_active) {
305                                 netdev_dbg(priv->dev, "disable EEE\n");
306                                 del_timer_sync(&priv->eee_ctrl_timer);
307                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
308                                                              tx_lpi_timer);
309                         }
310                         priv->eee_active = 0;
311                         spin_unlock_irqrestore(&priv->lock, flags);
312                         goto out;
313                 }
314                 /* Activate the EEE and start timers */
315                 spin_lock_irqsave(&priv->lock, flags);
316                 if (!priv->eee_active) {
317                         priv->eee_active = 1;
318                         setup_timer(&priv->eee_ctrl_timer,
319                                     stmmac_eee_ctrl_timer,
320                                     (unsigned long)priv);
321                         mod_timer(&priv->eee_ctrl_timer,
322                                   STMMAC_LPI_T(eee_timer));
323
324                         priv->hw->mac->set_eee_timer(priv->hw,
325                                                      STMMAC_DEFAULT_LIT_LS,
326                                                      tx_lpi_timer);
327                 }
328                 /* Set HW EEE according to the speed */
329                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330
331                 ret = true;
332                 spin_unlock_irqrestore(&priv->lock, flags);
333
334                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335         }
336 out:
337         return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @p : descriptor pointer
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349                                    struct dma_desc *p, struct sk_buff *skb)
350 {
351         struct skb_shared_hwtstamps shhwtstamp;
352         u64 ns;
353
354         if (!priv->hwts_tx_en)
355                 return;
356
357         /* exit if skb doesn't support hw tstamp */
358         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359                 return;
360
361         /* check tx tstamp status */
362         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363                 /* get the valid tstamp */
364                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365
366                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
368
369                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370                 /* pass tstamp to stack */
371                 skb_tstamp_tx(skb, &shhwtstamp);
372         }
373
374         return;
375 }
376
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378  * @priv: driver private structure
379  * @p : descriptor pointer
380  * @np : next descriptor pointer
381  * @skb : the socket buffer
382  * Description :
383  * This function will read received packet's timestamp from the descriptor
384  * and pass it to stack. It also perform some sanity checks.
385  */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387                                    struct dma_desc *np, struct sk_buff *skb)
388 {
389         struct skb_shared_hwtstamps *shhwtstamp = NULL;
390         u64 ns;
391
392         if (!priv->hwts_rx_en)
393                 return;
394
395         /* Check if timestamp is available */
396         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397                 /* For GMAC4, the valid timestamp is from CTX next desc. */
398                 if (priv->plat->has_gmac4)
399                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400                 else
401                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402
403                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404                 shhwtstamp = skb_hwtstamps(skb);
405                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407         } else  {
408                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409         }
410 }
411
412 /**
413  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
414  *  @dev: device pointer.
415  *  @ifr: An IOCTL specific structure, that can contain a pointer to
416  *  a proprietary structure used to pass information to the driver.
417  *  Description:
418  *  This function configures the MAC to enable/disable both outgoing(TX)
419  *  and incoming(RX) packets time stamping based on user input.
420  *  Return Value:
421  *  0 on success and an appropriate -ve integer on failure.
422  */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425         struct stmmac_priv *priv = netdev_priv(dev);
426         struct hwtstamp_config config;
427         struct timespec64 now;
428         u64 temp = 0;
429         u32 ptp_v2 = 0;
430         u32 tstamp_all = 0;
431         u32 ptp_over_ipv4_udp = 0;
432         u32 ptp_over_ipv6_udp = 0;
433         u32 ptp_over_ethernet = 0;
434         u32 snap_type_sel = 0;
435         u32 ts_master_en = 0;
436         u32 ts_event_en = 0;
437         u32 value = 0;
438         u32 sec_inc;
439
440         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441                 netdev_alert(priv->dev, "No support for HW time stamping\n");
442                 priv->hwts_tx_en = 0;
443                 priv->hwts_rx_en = 0;
444
445                 return -EOPNOTSUPP;
446         }
447
448         if (copy_from_user(&config, ifr->ifr_data,
449                            sizeof(struct hwtstamp_config)))
450                 return -EFAULT;
451
452         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453                    __func__, config.flags, config.tx_type, config.rx_filter);
454
455         /* reserved for future extensions */
456         if (config.flags)
457                 return -EINVAL;
458
459         if (config.tx_type != HWTSTAMP_TX_OFF &&
460             config.tx_type != HWTSTAMP_TX_ON)
461                 return -ERANGE;
462
463         if (priv->adv_ts) {
464                 switch (config.rx_filter) {
465                 case HWTSTAMP_FILTER_NONE:
466                         /* time stamp no incoming packet at all */
467                         config.rx_filter = HWTSTAMP_FILTER_NONE;
468                         break;
469
470                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471                         /* PTP v1, UDP, any kind of event packet */
472                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473                         /* take time stamp for all event messages */
474                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478                         break;
479
480                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481                         /* PTP v1, UDP, Sync packet */
482                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483                         /* take time stamp for SYNC messages only */
484                         ts_event_en = PTP_TCR_TSEVNTENA;
485
486                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488                         break;
489
490                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491                         /* PTP v1, UDP, Delay_req packet */
492                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493                         /* take time stamp for Delay_Req messages only */
494                         ts_master_en = PTP_TCR_TSMSTRENA;
495                         ts_event_en = PTP_TCR_TSEVNTENA;
496
497                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499                         break;
500
501                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502                         /* PTP v2, UDP, any kind of event packet */
503                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504                         ptp_v2 = PTP_TCR_TSVER2ENA;
505                         /* take time stamp for all event messages */
506                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510                         break;
511
512                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513                         /* PTP v2, UDP, Sync packet */
514                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515                         ptp_v2 = PTP_TCR_TSVER2ENA;
516                         /* take time stamp for SYNC messages only */
517                         ts_event_en = PTP_TCR_TSEVNTENA;
518
519                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521                         break;
522
523                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524                         /* PTP v2, UDP, Delay_req packet */
525                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526                         ptp_v2 = PTP_TCR_TSVER2ENA;
527                         /* take time stamp for Delay_Req messages only */
528                         ts_master_en = PTP_TCR_TSMSTRENA;
529                         ts_event_en = PTP_TCR_TSEVNTENA;
530
531                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533                         break;
534
535                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
536                         /* PTP v2/802.AS1 any layer, any kind of event packet */
537                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538                         ptp_v2 = PTP_TCR_TSVER2ENA;
539                         /* take time stamp for all event messages */
540                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544                         ptp_over_ethernet = PTP_TCR_TSIPENA;
545                         break;
546
547                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
548                         /* PTP v2/802.AS1, any layer, Sync packet */
549                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550                         ptp_v2 = PTP_TCR_TSVER2ENA;
551                         /* take time stamp for SYNC messages only */
552                         ts_event_en = PTP_TCR_TSEVNTENA;
553
554                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556                         ptp_over_ethernet = PTP_TCR_TSIPENA;
557                         break;
558
559                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560                         /* PTP v2/802.AS1, any layer, Delay_req packet */
561                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562                         ptp_v2 = PTP_TCR_TSVER2ENA;
563                         /* take time stamp for Delay_Req messages only */
564                         ts_master_en = PTP_TCR_TSMSTRENA;
565                         ts_event_en = PTP_TCR_TSEVNTENA;
566
567                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569                         ptp_over_ethernet = PTP_TCR_TSIPENA;
570                         break;
571
572                 case HWTSTAMP_FILTER_ALL:
573                         /* time stamp any incoming packet */
574                         config.rx_filter = HWTSTAMP_FILTER_ALL;
575                         tstamp_all = PTP_TCR_TSENALL;
576                         break;
577
578                 default:
579                         return -ERANGE;
580                 }
581         } else {
582                 switch (config.rx_filter) {
583                 case HWTSTAMP_FILTER_NONE:
584                         config.rx_filter = HWTSTAMP_FILTER_NONE;
585                         break;
586                 default:
587                         /* PTP v1, UDP, any kind of event packet */
588                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589                         break;
590                 }
591         }
592         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594
595         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597         else {
598                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599                          tstamp_all | ptp_v2 | ptp_over_ethernet |
600                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601                          ts_master_en | snap_type_sel);
602                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603
604                 /* program Sub Second Increment reg */
605                 sec_inc = priv->hw->ptp->config_sub_second_increment(
606                         priv->ptpaddr, priv->plat->clk_ptp_rate,
607                         priv->plat->has_gmac4);
608                 temp = div_u64(1000000000ULL, sec_inc);
609
610                 /* calculate default added value:
611                  * formula is :
612                  * addend = (2^32)/freq_div_ratio;
613                  * where, freq_div_ratio = 1e9ns/sec_inc
614                  */
615                 temp = (u64)(temp << 32);
616                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617                 priv->hw->ptp->config_addend(priv->ptpaddr,
618                                              priv->default_addend);
619
620                 /* initialize system time */
621                 ktime_get_real_ts64(&now);
622
623                 /* lower 32 bits of tv_sec are safe until y2106 */
624                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625                                             now.tv_nsec);
626         }
627
628         return copy_to_user(ifr->ifr_data, &config,
629                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631
632 /**
633  * stmmac_init_ptp - init PTP
634  * @priv: driver private structure
635  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636  * This is done by looking at the HW cap. register.
637  * This function also registers the ptp driver.
638  */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642                 return -EOPNOTSUPP;
643
644         priv->adv_ts = 0;
645         /* Check if adv_ts can be enabled for dwmac 4.x core */
646         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647                 priv->adv_ts = 1;
648         /* Dwmac 3.x core with extend_desc can support adv_ts */
649         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650                 priv->adv_ts = 1;
651
652         if (priv->dma_cap.time_stamp)
653                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654
655         if (priv->adv_ts)
656                 netdev_info(priv->dev,
657                             "IEEE 1588-2008 Advanced Timestamp supported\n");
658
659         priv->hw->ptp = &stmmac_ptp;
660         priv->hwts_tx_en = 0;
661         priv->hwts_rx_en = 0;
662
663         stmmac_ptp_register(priv);
664
665         return 0;
666 }
667
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670         if (priv->plat->clk_ptp_ref)
671                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
672         stmmac_ptp_unregister(priv);
673 }
674
675 /**
676  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
677  *  @priv: driver private structure
678  *  Description: It is used for configuring the flow control in all queues
679  */
680 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
681 {
682         u32 tx_cnt = priv->plat->tx_queues_to_use;
683
684         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
685                                  priv->pause, tx_cnt);
686 }
687
688 /**
689  * stmmac_adjust_link - adjusts the link parameters
690  * @dev: net device structure
691  * Description: this is the helper called by the physical abstraction layer
692  * drivers to communicate the phy link status. According the speed and duplex
693  * this driver can invoke registered glue-logic as well.
694  * It also invoke the eee initialization because it could happen when switch
695  * on different networks (that are eee capable).
696  */
697 static void stmmac_adjust_link(struct net_device *dev)
698 {
699         struct stmmac_priv *priv = netdev_priv(dev);
700         struct phy_device *phydev = dev->phydev;
701         unsigned long flags;
702         int new_state = 0;
703
704         if (!phydev)
705                 return;
706
707         spin_lock_irqsave(&priv->lock, flags);
708
709         if (phydev->link) {
710                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
711
712                 /* Now we make sure that we can be in full duplex mode.
713                  * If not, we operate in half-duplex mode. */
714                 if (phydev->duplex != priv->oldduplex) {
715                         new_state = 1;
716                         if (!(phydev->duplex))
717                                 ctrl &= ~priv->hw->link.duplex;
718                         else
719                                 ctrl |= priv->hw->link.duplex;
720                         priv->oldduplex = phydev->duplex;
721                 }
722                 /* Flow Control operation */
723                 if (phydev->pause)
724                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
725
726                 if (phydev->speed != priv->speed) {
727                         new_state = 1;
728                         switch (phydev->speed) {
729                         case 1000:
730                                 if (priv->plat->has_gmac ||
731                                     priv->plat->has_gmac4)
732                                         ctrl &= ~priv->hw->link.port;
733                                 break;
734                         case 100:
735                                 if (priv->plat->has_gmac ||
736                                     priv->plat->has_gmac4) {
737                                         ctrl |= priv->hw->link.port;
738                                         ctrl |= priv->hw->link.speed;
739                                 } else {
740                                         ctrl &= ~priv->hw->link.port;
741                                 }
742                                 break;
743                         case 10:
744                                 if (priv->plat->has_gmac ||
745                                     priv->plat->has_gmac4) {
746                                         ctrl |= priv->hw->link.port;
747                                         ctrl &= ~(priv->hw->link.speed);
748                                 } else {
749                                         ctrl &= ~priv->hw->link.port;
750                                 }
751                                 break;
752                         default:
753                                 netif_warn(priv, link, priv->dev,
754                                            "broken speed: %d\n", phydev->speed);
755                                 phydev->speed = SPEED_UNKNOWN;
756                                 break;
757                         }
758                         if (phydev->speed != SPEED_UNKNOWN)
759                                 stmmac_hw_fix_mac_speed(priv);
760                         priv->speed = phydev->speed;
761                 }
762
763                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
764
765                 if (!priv->oldlink) {
766                         new_state = 1;
767                         priv->oldlink = 1;
768                 }
769         } else if (priv->oldlink) {
770                 new_state = 1;
771                 priv->oldlink = 0;
772                 priv->speed = SPEED_UNKNOWN;
773                 priv->oldduplex = DUPLEX_UNKNOWN;
774         }
775
776         if (new_state && netif_msg_link(priv))
777                 phy_print_status(phydev);
778
779         spin_unlock_irqrestore(&priv->lock, flags);
780
781         if (phydev->is_pseudo_fixed_link)
782                 /* Stop PHY layer to call the hook to adjust the link in case
783                  * of a switch is attached to the stmmac driver.
784                  */
785                 phydev->irq = PHY_IGNORE_INTERRUPT;
786         else
787                 /* At this stage, init the EEE if supported.
788                  * Never called in case of fixed_link.
789                  */
790                 priv->eee_enabled = stmmac_eee_init(priv);
791 }
792
793 /**
794  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
795  * @priv: driver private structure
796  * Description: this is to verify if the HW supports the PCS.
797  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
798  * configured for the TBI, RTBI, or SGMII PHY interface.
799  */
800 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
801 {
802         int interface = priv->plat->interface;
803
804         if (priv->dma_cap.pcs) {
805                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
806                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
807                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
808                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
809                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
810                         priv->hw->pcs = STMMAC_PCS_RGMII;
811                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
812                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
813                         priv->hw->pcs = STMMAC_PCS_SGMII;
814                 }
815         }
816 }
817
818 /**
819  * stmmac_init_phy - PHY initialization
820  * @dev: net device structure
821  * Description: it initializes the driver's PHY state, and attaches the PHY
822  * to the mac driver.
823  *  Return value:
824  *  0 on success
825  */
826 static int stmmac_init_phy(struct net_device *dev)
827 {
828         struct stmmac_priv *priv = netdev_priv(dev);
829         struct phy_device *phydev;
830         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
831         char bus_id[MII_BUS_ID_SIZE];
832         int interface = priv->plat->interface;
833         int max_speed = priv->plat->max_speed;
834         priv->oldlink = 0;
835         priv->speed = SPEED_UNKNOWN;
836         priv->oldduplex = DUPLEX_UNKNOWN;
837
838         if (priv->plat->phy_node) {
839                 phydev = of_phy_connect(dev, priv->plat->phy_node,
840                                         &stmmac_adjust_link, 0, interface);
841         } else {
842                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
843                          priv->plat->bus_id);
844
845                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
846                          priv->plat->phy_addr);
847                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
848                            phy_id_fmt);
849
850                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
851                                      interface);
852         }
853
854         if (IS_ERR_OR_NULL(phydev)) {
855                 netdev_err(priv->dev, "Could not attach to PHY\n");
856                 if (!phydev)
857                         return -ENODEV;
858
859                 return PTR_ERR(phydev);
860         }
861
862         /* Stop Advertising 1000BASE Capability if interface is not GMII */
863         if ((interface == PHY_INTERFACE_MODE_MII) ||
864             (interface == PHY_INTERFACE_MODE_RMII) ||
865                 (max_speed < 1000 && max_speed > 0))
866                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
867                                          SUPPORTED_1000baseT_Full);
868
869         /*
870          * Broken HW is sometimes missing the pull-up resistor on the
871          * MDIO line, which results in reads to non-existent devices returning
872          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
873          * device as well.
874          * Note: phydev->phy_id is the result of reading the UID PHY registers.
875          */
876         if (!priv->plat->phy_node && phydev->phy_id == 0) {
877                 phy_disconnect(phydev);
878                 return -ENODEV;
879         }
880
881         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
882          * subsequent PHY polling, make sure we force a link transition if
883          * we have a UP/DOWN/UP transition
884          */
885         if (phydev->is_pseudo_fixed_link)
886                 phydev->irq = PHY_POLL;
887
888         phy_attached_info(phydev);
889         return 0;
890 }
891
892 static void stmmac_display_rings(struct stmmac_priv *priv)
893 {
894         void *head_rx, *head_tx;
895
896         if (priv->extend_desc) {
897                 head_rx = (void *)priv->dma_erx;
898                 head_tx = (void *)priv->dma_etx;
899         } else {
900                 head_rx = (void *)priv->dma_rx;
901                 head_tx = (void *)priv->dma_tx;
902         }
903
904         /* Display Rx ring */
905         priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
906         /* Display Tx ring */
907         priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
908 }
909
910 static int stmmac_set_bfsize(int mtu, int bufsize)
911 {
912         int ret = bufsize;
913
914         if (mtu >= BUF_SIZE_4KiB)
915                 ret = BUF_SIZE_8KiB;
916         else if (mtu >= BUF_SIZE_2KiB)
917                 ret = BUF_SIZE_4KiB;
918         else if (mtu > DEFAULT_BUFSIZE)
919                 ret = BUF_SIZE_2KiB;
920         else
921                 ret = DEFAULT_BUFSIZE;
922
923         return ret;
924 }
925
926 /**
927  * stmmac_clear_descriptors - clear descriptors
928  * @priv: driver private structure
929  * Description: this function is called to clear the tx and rx descriptors
930  * in case of both basic and extended descriptors are used.
931  */
932 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
933 {
934         int i;
935
936         /* Clear the Rx/Tx descriptors */
937         for (i = 0; i < DMA_RX_SIZE; i++)
938                 if (priv->extend_desc)
939                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
940                                                      priv->use_riwt, priv->mode,
941                                                      (i == DMA_RX_SIZE - 1));
942                 else
943                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
944                                                      priv->use_riwt, priv->mode,
945                                                      (i == DMA_RX_SIZE - 1));
946         for (i = 0; i < DMA_TX_SIZE; i++)
947                 if (priv->extend_desc)
948                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
949                                                      priv->mode,
950                                                      (i == DMA_TX_SIZE - 1));
951                 else
952                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
953                                                      priv->mode,
954                                                      (i == DMA_TX_SIZE - 1));
955 }
956
957 /**
958  * stmmac_init_rx_buffers - init the RX descriptor buffer.
959  * @priv: driver private structure
960  * @p: descriptor pointer
961  * @i: descriptor index
962  * @flags: gfp flag.
963  * Description: this function is called to allocate a receive buffer, perform
964  * the DMA mapping and init the descriptor.
965  */
966 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
967                                   int i, gfp_t flags)
968 {
969         struct sk_buff *skb;
970
971         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
972         if (!skb) {
973                 netdev_err(priv->dev,
974                            "%s: Rx init fails; skb is NULL\n", __func__);
975                 return -ENOMEM;
976         }
977         priv->rx_skbuff[i] = skb;
978         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
979                                                 priv->dma_buf_sz,
980                                                 DMA_FROM_DEVICE);
981         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
982                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
983                 dev_kfree_skb_any(skb);
984                 return -EINVAL;
985         }
986
987         if (priv->synopsys_id >= DWMAC_CORE_4_00)
988                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
989         else
990                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
991
992         if ((priv->hw->mode->init_desc3) &&
993             (priv->dma_buf_sz == BUF_SIZE_16KiB))
994                 priv->hw->mode->init_desc3(p);
995
996         return 0;
997 }
998
999 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1000 {
1001         if (priv->rx_skbuff[i]) {
1002                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1003                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1004                 dev_kfree_skb_any(priv->rx_skbuff[i]);
1005         }
1006         priv->rx_skbuff[i] = NULL;
1007 }
1008
1009 /**
1010  * init_dma_desc_rings - init the RX/TX descriptor rings
1011  * @dev: net device structure
1012  * @flags: gfp flag.
1013  * Description: this function initializes the DMA RX/TX descriptors
1014  * and allocates the socket buffers. It supports the chained and ring
1015  * modes.
1016  */
1017 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1018 {
1019         int i;
1020         struct stmmac_priv *priv = netdev_priv(dev);
1021         unsigned int bfsize = 0;
1022         int ret = -ENOMEM;
1023
1024         if (priv->hw->mode->set_16kib_bfsize)
1025                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1026
1027         if (bfsize < BUF_SIZE_16KiB)
1028                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1029
1030         priv->dma_buf_sz = bfsize;
1031
1032         netif_dbg(priv, probe, priv->dev,
1033                   "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1034                   __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1035
1036         /* RX INITIALIZATION */
1037         netif_dbg(priv, probe, priv->dev,
1038                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1039
1040         for (i = 0; i < DMA_RX_SIZE; i++) {
1041                 struct dma_desc *p;
1042                 if (priv->extend_desc)
1043                         p = &((priv->dma_erx + i)->basic);
1044                 else
1045                         p = priv->dma_rx + i;
1046
1047                 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1048                 if (ret)
1049                         goto err_init_rx_buffers;
1050
1051                 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1052                           priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1053                           (unsigned int)priv->rx_skbuff_dma[i]);
1054         }
1055         priv->cur_rx = 0;
1056         priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1057         buf_sz = bfsize;
1058
1059         /* Setup the chained descriptor addresses */
1060         if (priv->mode == STMMAC_CHAIN_MODE) {
1061                 if (priv->extend_desc) {
1062                         priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1063                                              DMA_RX_SIZE, 1);
1064                         priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1065                                              DMA_TX_SIZE, 1);
1066                 } else {
1067                         priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1068                                              DMA_RX_SIZE, 0);
1069                         priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1070                                              DMA_TX_SIZE, 0);
1071                 }
1072         }
1073
1074         /* TX INITIALIZATION */
1075         for (i = 0; i < DMA_TX_SIZE; i++) {
1076                 struct dma_desc *p;
1077                 if (priv->extend_desc)
1078                         p = &((priv->dma_etx + i)->basic);
1079                 else
1080                         p = priv->dma_tx + i;
1081
1082                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1083                         p->des0 = 0;
1084                         p->des1 = 0;
1085                         p->des2 = 0;
1086                         p->des3 = 0;
1087                 } else {
1088                         p->des2 = 0;
1089                 }
1090
1091                 priv->tx_skbuff_dma[i].buf = 0;
1092                 priv->tx_skbuff_dma[i].map_as_page = false;
1093                 priv->tx_skbuff_dma[i].len = 0;
1094                 priv->tx_skbuff_dma[i].last_segment = false;
1095                 priv->tx_skbuff[i] = NULL;
1096         }
1097
1098         priv->dirty_tx = 0;
1099         priv->cur_tx = 0;
1100         netdev_reset_queue(priv->dev);
1101
1102         stmmac_clear_descriptors(priv);
1103
1104         if (netif_msg_hw(priv))
1105                 stmmac_display_rings(priv);
1106
1107         return 0;
1108 err_init_rx_buffers:
1109         while (--i >= 0)
1110                 stmmac_free_rx_buffers(priv, i);
1111         return ret;
1112 }
1113
1114 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1115 {
1116         int i;
1117
1118         for (i = 0; i < DMA_RX_SIZE; i++)
1119                 stmmac_free_rx_buffers(priv, i);
1120 }
1121
1122 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1123 {
1124         int i;
1125
1126         for (i = 0; i < DMA_TX_SIZE; i++) {
1127                 if (priv->tx_skbuff_dma[i].buf) {
1128                         if (priv->tx_skbuff_dma[i].map_as_page)
1129                                 dma_unmap_page(priv->device,
1130                                                priv->tx_skbuff_dma[i].buf,
1131                                                priv->tx_skbuff_dma[i].len,
1132                                                DMA_TO_DEVICE);
1133                         else
1134                                 dma_unmap_single(priv->device,
1135                                                  priv->tx_skbuff_dma[i].buf,
1136                                                  priv->tx_skbuff_dma[i].len,
1137                                                  DMA_TO_DEVICE);
1138                 }
1139
1140                 if (priv->tx_skbuff[i]) {
1141                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1142                         priv->tx_skbuff[i] = NULL;
1143                         priv->tx_skbuff_dma[i].buf = 0;
1144                         priv->tx_skbuff_dma[i].map_as_page = false;
1145                 }
1146         }
1147 }
1148
1149 /**
1150  * alloc_dma_desc_resources - alloc TX/RX resources.
1151  * @priv: private structure
1152  * Description: according to which descriptor can be used (extend or basic)
1153  * this function allocates the resources for TX and RX paths. In case of
1154  * reception, for example, it pre-allocated the RX socket buffer in order to
1155  * allow zero-copy mechanism.
1156  */
1157 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1158 {
1159         int ret = -ENOMEM;
1160
1161         priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1162                                             GFP_KERNEL);
1163         if (!priv->rx_skbuff_dma)
1164                 return -ENOMEM;
1165
1166         priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1167                                         GFP_KERNEL);
1168         if (!priv->rx_skbuff)
1169                 goto err_rx_skbuff;
1170
1171         priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1172                                             sizeof(*priv->tx_skbuff_dma),
1173                                             GFP_KERNEL);
1174         if (!priv->tx_skbuff_dma)
1175                 goto err_tx_skbuff_dma;
1176
1177         priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1178                                         GFP_KERNEL);
1179         if (!priv->tx_skbuff)
1180                 goto err_tx_skbuff;
1181
1182         if (priv->extend_desc) {
1183                 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1184                                                     sizeof(struct
1185                                                            dma_extended_desc),
1186                                                     &priv->dma_rx_phy,
1187                                                     GFP_KERNEL);
1188                 if (!priv->dma_erx)
1189                         goto err_dma;
1190
1191                 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1192                                                     sizeof(struct
1193                                                            dma_extended_desc),
1194                                                     &priv->dma_tx_phy,
1195                                                     GFP_KERNEL);
1196                 if (!priv->dma_etx) {
1197                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1198                                           sizeof(struct dma_extended_desc),
1199                                           priv->dma_erx, priv->dma_rx_phy);
1200                         goto err_dma;
1201                 }
1202         } else {
1203                 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1204                                                    sizeof(struct dma_desc),
1205                                                    &priv->dma_rx_phy,
1206                                                    GFP_KERNEL);
1207                 if (!priv->dma_rx)
1208                         goto err_dma;
1209
1210                 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1211                                                    sizeof(struct dma_desc),
1212                                                    &priv->dma_tx_phy,
1213                                                    GFP_KERNEL);
1214                 if (!priv->dma_tx) {
1215                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1216                                           sizeof(struct dma_desc),
1217                                           priv->dma_rx, priv->dma_rx_phy);
1218                         goto err_dma;
1219                 }
1220         }
1221
1222         return 0;
1223
1224 err_dma:
1225         kfree(priv->tx_skbuff);
1226 err_tx_skbuff:
1227         kfree(priv->tx_skbuff_dma);
1228 err_tx_skbuff_dma:
1229         kfree(priv->rx_skbuff);
1230 err_rx_skbuff:
1231         kfree(priv->rx_skbuff_dma);
1232         return ret;
1233 }
1234
1235 static void free_dma_desc_resources(struct stmmac_priv *priv)
1236 {
1237         /* Release the DMA TX/RX socket buffers */
1238         dma_free_rx_skbufs(priv);
1239         dma_free_tx_skbufs(priv);
1240
1241         /* Free DMA regions of consistent memory previously allocated */
1242         if (!priv->extend_desc) {
1243                 dma_free_coherent(priv->device,
1244                                   DMA_TX_SIZE * sizeof(struct dma_desc),
1245                                   priv->dma_tx, priv->dma_tx_phy);
1246                 dma_free_coherent(priv->device,
1247                                   DMA_RX_SIZE * sizeof(struct dma_desc),
1248                                   priv->dma_rx, priv->dma_rx_phy);
1249         } else {
1250                 dma_free_coherent(priv->device, DMA_TX_SIZE *
1251                                   sizeof(struct dma_extended_desc),
1252                                   priv->dma_etx, priv->dma_tx_phy);
1253                 dma_free_coherent(priv->device, DMA_RX_SIZE *
1254                                   sizeof(struct dma_extended_desc),
1255                                   priv->dma_erx, priv->dma_rx_phy);
1256         }
1257         kfree(priv->rx_skbuff_dma);
1258         kfree(priv->rx_skbuff);
1259         kfree(priv->tx_skbuff_dma);
1260         kfree(priv->tx_skbuff);
1261 }
1262
1263 /**
1264  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1265  *  @priv: driver private structure
1266  *  Description: It is used for enabling the rx queues in the MAC
1267  */
1268 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1269 {
1270         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1271         int queue;
1272         u8 mode;
1273
1274         for (queue = 0; queue < rx_queues_count; queue++) {
1275                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1276                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1277         }
1278 }
1279
1280 /**
1281  *  stmmac_dma_operation_mode - HW DMA operation mode
1282  *  @priv: driver private structure
1283  *  Description: it is used for configuring the DMA operation mode register in
1284  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1285  */
1286 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1287 {
1288         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1289         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1290         int rxfifosz = priv->plat->rx_fifo_size;
1291         u32 txmode = 0;
1292         u32 rxmode = 0;
1293         u32 chan = 0;
1294
1295         if (rxfifosz == 0)
1296                 rxfifosz = priv->dma_cap.rx_fifo_size;
1297
1298         if (priv->plat->force_thresh_dma_mode) {
1299                 txmode = tc;
1300                 rxmode = tc;
1301         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1302                 /*
1303                  * In case of GMAC, SF mode can be enabled
1304                  * to perform the TX COE in HW. This depends on:
1305                  * 1) TX COE if actually supported
1306                  * 2) There is no bugged Jumbo frame support
1307                  *    that needs to not insert csum in the TDES.
1308                  */
1309                 txmode = SF_DMA_MODE;
1310                 rxmode = SF_DMA_MODE;
1311                 priv->xstats.threshold = SF_DMA_MODE;
1312         } else {
1313                 txmode = tc;
1314                 rxmode = SF_DMA_MODE;
1315         }
1316
1317         /* configure all channels */
1318         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1319                 for (chan = 0; chan < rx_channels_count; chan++)
1320                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1321                                                    rxfifosz);
1322
1323                 for (chan = 0; chan < tx_channels_count; chan++)
1324                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1325         } else {
1326                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1327                                         rxfifosz);
1328         }
1329 }
1330
1331 /**
1332  * stmmac_tx_clean - to manage the transmission completion
1333  * @priv: driver private structure
1334  * Description: it reclaims the transmit resources after transmission completes.
1335  */
1336 static void stmmac_tx_clean(struct stmmac_priv *priv)
1337 {
1338         unsigned int bytes_compl = 0, pkts_compl = 0;
1339         unsigned int entry = priv->dirty_tx;
1340
1341         netif_tx_lock(priv->dev);
1342
1343         priv->xstats.tx_clean++;
1344
1345         while (entry != priv->cur_tx) {
1346                 struct sk_buff *skb = priv->tx_skbuff[entry];
1347                 struct dma_desc *p;
1348                 int status;
1349
1350                 if (priv->extend_desc)
1351                         p = (struct dma_desc *)(priv->dma_etx + entry);
1352                 else
1353                         p = priv->dma_tx + entry;
1354
1355                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1356                                                       &priv->xstats, p,
1357                                                       priv->ioaddr);
1358                 /* Check if the descriptor is owned by the DMA */
1359                 if (unlikely(status & tx_dma_own))
1360                         break;
1361
1362                 /* Just consider the last segment and ...*/
1363                 if (likely(!(status & tx_not_ls))) {
1364                         /* ... verify the status error condition */
1365                         if (unlikely(status & tx_err)) {
1366                                 priv->dev->stats.tx_errors++;
1367                         } else {
1368                                 priv->dev->stats.tx_packets++;
1369                                 priv->xstats.tx_pkt_n++;
1370                         }
1371                         stmmac_get_tx_hwtstamp(priv, p, skb);
1372                 }
1373
1374                 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1375                         if (priv->tx_skbuff_dma[entry].map_as_page)
1376                                 dma_unmap_page(priv->device,
1377                                                priv->tx_skbuff_dma[entry].buf,
1378                                                priv->tx_skbuff_dma[entry].len,
1379                                                DMA_TO_DEVICE);
1380                         else
1381                                 dma_unmap_single(priv->device,
1382                                                  priv->tx_skbuff_dma[entry].buf,
1383                                                  priv->tx_skbuff_dma[entry].len,
1384                                                  DMA_TO_DEVICE);
1385                         priv->tx_skbuff_dma[entry].buf = 0;
1386                         priv->tx_skbuff_dma[entry].len = 0;
1387                         priv->tx_skbuff_dma[entry].map_as_page = false;
1388                 }
1389
1390                 if (priv->hw->mode->clean_desc3)
1391                         priv->hw->mode->clean_desc3(priv, p);
1392
1393                 priv->tx_skbuff_dma[entry].last_segment = false;
1394                 priv->tx_skbuff_dma[entry].is_jumbo = false;
1395
1396                 if (likely(skb != NULL)) {
1397                         pkts_compl++;
1398                         bytes_compl += skb->len;
1399                         dev_consume_skb_any(skb);
1400                         priv->tx_skbuff[entry] = NULL;
1401                 }
1402
1403                 priv->hw->desc->release_tx_desc(p, priv->mode);
1404
1405                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1406         }
1407         priv->dirty_tx = entry;
1408
1409         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1410
1411         if (unlikely(netif_queue_stopped(priv->dev) &&
1412             stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1413                 netif_dbg(priv, tx_done, priv->dev,
1414                           "%s: restart transmit\n", __func__);
1415                 netif_wake_queue(priv->dev);
1416         }
1417
1418         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1419                 stmmac_enable_eee_mode(priv);
1420                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1421         }
1422         netif_tx_unlock(priv->dev);
1423 }
1424
1425 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1426 {
1427         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1428 }
1429
1430 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1431 {
1432         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1433 }
1434
1435 /**
1436  * stmmac_tx_err - to manage the tx error
1437  * @priv: driver private structure
1438  * Description: it cleans the descriptors and restarts the transmission
1439  * in case of transmission errors.
1440  */
1441 static void stmmac_tx_err(struct stmmac_priv *priv)
1442 {
1443         int i;
1444         netif_stop_queue(priv->dev);
1445
1446         priv->hw->dma->stop_tx(priv->ioaddr);
1447         dma_free_tx_skbufs(priv);
1448         for (i = 0; i < DMA_TX_SIZE; i++)
1449                 if (priv->extend_desc)
1450                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1451                                                      priv->mode,
1452                                                      (i == DMA_TX_SIZE - 1));
1453                 else
1454                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1455                                                      priv->mode,
1456                                                      (i == DMA_TX_SIZE - 1));
1457         priv->dirty_tx = 0;
1458         priv->cur_tx = 0;
1459         netdev_reset_queue(priv->dev);
1460         priv->hw->dma->start_tx(priv->ioaddr);
1461
1462         priv->dev->stats.tx_errors++;
1463         netif_wake_queue(priv->dev);
1464 }
1465
1466 /**
1467  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1468  *  @priv: driver private structure
1469  *  @txmode: TX operating mode
1470  *  @rxmode: RX operating mode
1471  *  @chan: channel index
1472  *  Description: it is used for configuring of the DMA operation mode in
1473  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1474  *  mode.
1475  */
1476 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1477                                           u32 rxmode, u32 chan)
1478 {
1479         int rxfifosz = priv->plat->rx_fifo_size;
1480
1481         if (rxfifosz == 0)
1482                 rxfifosz = priv->dma_cap.rx_fifo_size;
1483
1484         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1485                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1486                                            rxfifosz);
1487                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1488         } else {
1489                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1490                                         rxfifosz);
1491         }
1492 }
1493
1494 /**
1495  * stmmac_dma_interrupt - DMA ISR
1496  * @priv: driver private structure
1497  * Description: this is the DMA ISR. It is called by the main ISR.
1498  * It calls the dwmac dma routine and schedule poll method in case of some
1499  * work can be done.
1500  */
1501 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1502 {
1503         u32 chan = STMMAC_CHAN0;
1504         int status;
1505
1506         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1507         if (likely((status & handle_rx)) || (status & handle_tx)) {
1508                 if (likely(napi_schedule_prep(&priv->napi))) {
1509                         stmmac_disable_dma_irq(priv, chan);
1510                         __napi_schedule(&priv->napi);
1511                 }
1512         }
1513         if (unlikely(status & tx_hard_error_bump_tc)) {
1514                 /* Try to bump up the dma threshold on this failure */
1515                 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1516                     (tc <= 256)) {
1517                         tc += 64;
1518                         if (priv->plat->force_thresh_dma_mode)
1519                                 stmmac_set_dma_operation_mode(priv->ioaddr,
1520                                                               tc, tc, chan);
1521                         else
1522                                 stmmac_set_dma_operation_mode(priv->ioaddr, tc,
1523                                                              SF_DMA_MODE, chan);
1524
1525                         priv->xstats.threshold = tc;
1526                 }
1527         } else if (unlikely(status == tx_hard_error))
1528                 stmmac_tx_err(priv);
1529 }
1530
1531 /**
1532  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1533  * @priv: driver private structure
1534  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1535  */
1536 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1537 {
1538         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1539                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1540
1541         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1542                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1543                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1544         } else {
1545                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1546                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1547         }
1548
1549         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1550
1551         if (priv->dma_cap.rmon) {
1552                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1553                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1554         } else
1555                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1556 }
1557
1558 /**
1559  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1560  * @priv: driver private structure
1561  * Description: select the Enhanced/Alternate or Normal descriptors.
1562  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1563  * supported by the HW capability register.
1564  */
1565 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1566 {
1567         if (priv->plat->enh_desc) {
1568                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1569
1570                 /* GMAC older than 3.50 has no extended descriptors */
1571                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1572                         dev_info(priv->device, "Enabled extended descriptors\n");
1573                         priv->extend_desc = 1;
1574                 } else
1575                         dev_warn(priv->device, "Extended descriptors not supported\n");
1576
1577                 priv->hw->desc = &enh_desc_ops;
1578         } else {
1579                 dev_info(priv->device, "Normal descriptors\n");
1580                 priv->hw->desc = &ndesc_ops;
1581         }
1582 }
1583
1584 /**
1585  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1586  * @priv: driver private structure
1587  * Description:
1588  *  new GMAC chip generations have a new register to indicate the
1589  *  presence of the optional feature/functions.
1590  *  This can be also used to override the value passed through the
1591  *  platform and necessary for old MAC10/100 and GMAC chips.
1592  */
1593 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1594 {
1595         u32 ret = 0;
1596
1597         if (priv->hw->dma->get_hw_feature) {
1598                 priv->hw->dma->get_hw_feature(priv->ioaddr,
1599                                               &priv->dma_cap);
1600                 ret = 1;
1601         }
1602
1603         return ret;
1604 }
1605
1606 /**
1607  * stmmac_check_ether_addr - check if the MAC addr is valid
1608  * @priv: driver private structure
1609  * Description:
1610  * it is to verify if the MAC address is valid, in case of failures it
1611  * generates a random MAC address
1612  */
1613 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1614 {
1615         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1616                 priv->hw->mac->get_umac_addr(priv->hw,
1617                                              priv->dev->dev_addr, 0);
1618                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1619                         eth_hw_addr_random(priv->dev);
1620                 netdev_info(priv->dev, "device MAC address %pM\n",
1621                             priv->dev->dev_addr);
1622         }
1623 }
1624
1625 /**
1626  * stmmac_init_dma_engine - DMA init.
1627  * @priv: driver private structure
1628  * Description:
1629  * It inits the DMA invoking the specific MAC/GMAC callback.
1630  * Some DMA parameters can be passed from the platform;
1631  * in case of these are not passed a default is kept for the MAC or GMAC.
1632  */
1633 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1634 {
1635         int atds = 0;
1636         int ret = 0;
1637
1638         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1639                 dev_err(priv->device, "Invalid DMA configuration\n");
1640                 return -EINVAL;
1641         }
1642
1643         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1644                 atds = 1;
1645
1646         ret = priv->hw->dma->reset(priv->ioaddr);
1647         if (ret) {
1648                 dev_err(priv->device, "Failed to reset the dma\n");
1649                 return ret;
1650         }
1651
1652         priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1653                             priv->dma_tx_phy, priv->dma_rx_phy, atds);
1654
1655         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1656                 priv->rx_tail_addr = priv->dma_rx_phy +
1657                             (DMA_RX_SIZE * sizeof(struct dma_desc));
1658                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1659                                                STMMAC_CHAN0);
1660
1661                 priv->tx_tail_addr = priv->dma_tx_phy +
1662                             (DMA_TX_SIZE * sizeof(struct dma_desc));
1663                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1664                                                STMMAC_CHAN0);
1665         }
1666
1667         if (priv->plat->axi && priv->hw->dma->axi)
1668                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1669
1670         return ret;
1671 }
1672
1673 /**
1674  * stmmac_tx_timer - mitigation sw timer for tx.
1675  * @data: data pointer
1676  * Description:
1677  * This is the timer handler to directly invoke the stmmac_tx_clean.
1678  */
1679 static void stmmac_tx_timer(unsigned long data)
1680 {
1681         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1682
1683         stmmac_tx_clean(priv);
1684 }
1685
1686 /**
1687  * stmmac_init_tx_coalesce - init tx mitigation options.
1688  * @priv: driver private structure
1689  * Description:
1690  * This inits the transmit coalesce parameters: i.e. timer rate,
1691  * timer handler and default threshold used for enabling the
1692  * interrupt on completion bit.
1693  */
1694 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1695 {
1696         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1697         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1698         init_timer(&priv->txtimer);
1699         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1700         priv->txtimer.data = (unsigned long)priv;
1701         priv->txtimer.function = stmmac_tx_timer;
1702         add_timer(&priv->txtimer);
1703 }
1704
1705 /**
1706  *  stmmac_set_tx_queue_weight - Set TX queue weight
1707  *  @priv: driver private structure
1708  *  Description: It is used for setting TX queues weight
1709  */
1710 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
1711 {
1712         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1713         u32 weight;
1714         u32 queue;
1715
1716         for (queue = 0; queue < tx_queues_count; queue++) {
1717                 weight = priv->plat->tx_queues_cfg[queue].weight;
1718                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
1719         }
1720 }
1721
1722 /**
1723  *  stmmac_configure_cbs - Configure CBS in TX queue
1724  *  @priv: driver private structure
1725  *  Description: It is used for configuring CBS in AVB TX queues
1726  */
1727 static void stmmac_configure_cbs(struct stmmac_priv *priv)
1728 {
1729         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1730         u32 mode_to_use;
1731         u32 queue;
1732
1733         for (queue = 0; queue < tx_queues_count; queue++) {
1734                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
1735                 if (mode_to_use == MTL_QUEUE_DCB)
1736                         continue;
1737
1738                 priv->hw->mac->config_cbs(priv->hw,
1739                                 priv->plat->tx_queues_cfg[queue].send_slope,
1740                                 priv->plat->tx_queues_cfg[queue].idle_slope,
1741                                 priv->plat->tx_queues_cfg[queue].high_credit,
1742                                 priv->plat->tx_queues_cfg[queue].low_credit,
1743                                 queue);
1744         }
1745 }
1746
1747 /**
1748  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
1749  *  @priv: driver private structure
1750  *  Description: It is used for mapping RX queues to RX dma channels
1751  */
1752 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
1753 {
1754         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1755         u32 queue;
1756         u32 chan;
1757
1758         for (queue = 0; queue < rx_queues_count; queue++) {
1759                 chan = priv->plat->rx_queues_cfg[queue].chan;
1760                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
1761         }
1762 }
1763
1764 /**
1765  *  stmmac_mtl_configuration - Configure MTL
1766  *  @priv: driver private structure
1767  *  Description: It is used for configurring MTL
1768  */
1769 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
1770 {
1771         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1772         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1773
1774         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
1775                 stmmac_set_tx_queue_weight(priv);
1776
1777         /* Configure MTL RX algorithms */
1778         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
1779                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
1780                                                 priv->plat->rx_sched_algorithm);
1781
1782         /* Configure MTL TX algorithms */
1783         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
1784                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
1785                                                 priv->plat->tx_sched_algorithm);
1786
1787         /* Configure CBS in AVB TX queues */
1788         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
1789                 stmmac_configure_cbs(priv);
1790
1791         /* Map RX MTL to DMA channels */
1792         if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
1793                 stmmac_rx_queue_dma_chan_map(priv);
1794
1795         /* Enable MAC RX Queues */
1796         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
1797                 stmmac_mac_enable_rx_queues(priv);
1798
1799         /* Set the HW DMA mode and the COE */
1800         stmmac_dma_operation_mode(priv);
1801 }
1802
1803 /**
1804  * stmmac_hw_setup - setup mac in a usable state.
1805  *  @dev : pointer to the device structure.
1806  *  Description:
1807  *  this is the main function to setup the HW in a usable state because the
1808  *  dma engine is reset, the core registers are configured (e.g. AXI,
1809  *  Checksum features, timers). The DMA is ready to start receiving and
1810  *  transmitting.
1811  *  Return value:
1812  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1813  *  file on failure.
1814  */
1815 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1816 {
1817         struct stmmac_priv *priv = netdev_priv(dev);
1818         int ret;
1819
1820         /* DMA initialization and SW reset */
1821         ret = stmmac_init_dma_engine(priv);
1822         if (ret < 0) {
1823                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1824                            __func__);
1825                 return ret;
1826         }
1827
1828         /* Copy the MAC addr into the HW  */
1829         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1830
1831         /* PS and related bits will be programmed according to the speed */
1832         if (priv->hw->pcs) {
1833                 int speed = priv->plat->mac_port_sel_speed;
1834
1835                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1836                     (speed == SPEED_1000)) {
1837                         priv->hw->ps = speed;
1838                 } else {
1839                         dev_warn(priv->device, "invalid port speed\n");
1840                         priv->hw->ps = 0;
1841                 }
1842         }
1843
1844         /* Initialize the MAC Core */
1845         priv->hw->mac->core_init(priv->hw, dev->mtu);
1846
1847         /* Initialize MTL*/
1848         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1849                 stmmac_mtl_configuration(priv);
1850
1851         ret = priv->hw->mac->rx_ipc(priv->hw);
1852         if (!ret) {
1853                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1854                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1855                 priv->hw->rx_csum = 0;
1856         }
1857
1858         /* Enable the MAC Rx/Tx */
1859         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1860                 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1861         else
1862                 stmmac_set_mac(priv->ioaddr, true);
1863
1864         stmmac_mmc_setup(priv);
1865
1866         if (init_ptp) {
1867                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1868                 if (ret < 0)
1869                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
1870
1871                 ret = stmmac_init_ptp(priv);
1872                 if (ret == -EOPNOTSUPP)
1873                         netdev_warn(priv->dev, "PTP not supported by HW\n");
1874                 else if (ret)
1875                         netdev_warn(priv->dev, "PTP init failed\n");
1876         }
1877
1878 #ifdef CONFIG_DEBUG_FS
1879         ret = stmmac_init_fs(dev);
1880         if (ret < 0)
1881                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1882                             __func__);
1883 #endif
1884         /* Start the ball rolling... */
1885         netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1886         priv->hw->dma->start_tx(priv->ioaddr);
1887         priv->hw->dma->start_rx(priv->ioaddr);
1888
1889         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1890
1891         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1892                 priv->rx_riwt = MAX_DMA_RIWT;
1893                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1894         }
1895
1896         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1897                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1898
1899         /*  set TX ring length */
1900         if (priv->hw->dma->set_tx_ring_len)
1901                 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1902                                                (DMA_TX_SIZE - 1));
1903         /*  set RX ring length */
1904         if (priv->hw->dma->set_rx_ring_len)
1905                 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1906                                                (DMA_RX_SIZE - 1));
1907         /* Enable TSO */
1908         if (priv->tso)
1909                 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1910
1911         return 0;
1912 }
1913
1914 static void stmmac_hw_teardown(struct net_device *dev)
1915 {
1916         struct stmmac_priv *priv = netdev_priv(dev);
1917
1918         clk_disable_unprepare(priv->plat->clk_ptp_ref);
1919 }
1920
1921 /**
1922  *  stmmac_open - open entry point of the driver
1923  *  @dev : pointer to the device structure.
1924  *  Description:
1925  *  This function is the open entry point of the driver.
1926  *  Return value:
1927  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1928  *  file on failure.
1929  */
1930 static int stmmac_open(struct net_device *dev)
1931 {
1932         struct stmmac_priv *priv = netdev_priv(dev);
1933         int ret;
1934
1935         stmmac_check_ether_addr(priv);
1936
1937         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1938             priv->hw->pcs != STMMAC_PCS_TBI &&
1939             priv->hw->pcs != STMMAC_PCS_RTBI) {
1940                 ret = stmmac_init_phy(dev);
1941                 if (ret) {
1942                         netdev_err(priv->dev,
1943                                    "%s: Cannot attach to PHY (error: %d)\n",
1944                                    __func__, ret);
1945                         return ret;
1946                 }
1947         }
1948
1949         /* Extra statistics */
1950         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1951         priv->xstats.threshold = tc;
1952
1953         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1954         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1955
1956         ret = alloc_dma_desc_resources(priv);
1957         if (ret < 0) {
1958                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1959                            __func__);
1960                 goto dma_desc_error;
1961         }
1962
1963         ret = init_dma_desc_rings(dev, GFP_KERNEL);
1964         if (ret < 0) {
1965                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1966                            __func__);
1967                 goto init_error;
1968         }
1969
1970         ret = stmmac_hw_setup(dev, true);
1971         if (ret < 0) {
1972                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1973                 goto init_error;
1974         }
1975
1976         stmmac_init_tx_coalesce(priv);
1977
1978         if (dev->phydev)
1979                 phy_start(dev->phydev);
1980
1981         /* Request the IRQ lines */
1982         ret = request_irq(dev->irq, stmmac_interrupt,
1983                           IRQF_SHARED, dev->name, dev);
1984         if (unlikely(ret < 0)) {
1985                 netdev_err(priv->dev,
1986                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1987                            __func__, dev->irq, ret);
1988                 goto irq_error;
1989         }
1990
1991         /* Request the Wake IRQ in case of another line is used for WoL */
1992         if (priv->wol_irq != dev->irq) {
1993                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1994                                   IRQF_SHARED, dev->name, dev);
1995                 if (unlikely(ret < 0)) {
1996                         netdev_err(priv->dev,
1997                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1998                                    __func__, priv->wol_irq, ret);
1999                         goto wolirq_error;
2000                 }
2001         }
2002
2003         /* Request the IRQ lines */
2004         if (priv->lpi_irq > 0) {
2005                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2006                                   dev->name, dev);
2007                 if (unlikely(ret < 0)) {
2008                         netdev_err(priv->dev,
2009                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2010                                    __func__, priv->lpi_irq, ret);
2011                         goto lpiirq_error;
2012                 }
2013         }
2014
2015         napi_enable(&priv->napi);
2016         netif_start_queue(dev);
2017
2018         return 0;
2019
2020 lpiirq_error:
2021         if (priv->wol_irq != dev->irq)
2022                 free_irq(priv->wol_irq, dev);
2023 wolirq_error:
2024         free_irq(dev->irq, dev);
2025 irq_error:
2026         if (dev->phydev)
2027                 phy_stop(dev->phydev);
2028
2029         del_timer_sync(&priv->txtimer);
2030         stmmac_hw_teardown(dev);
2031 init_error:
2032         free_dma_desc_resources(priv);
2033 dma_desc_error:
2034         if (dev->phydev)
2035                 phy_disconnect(dev->phydev);
2036
2037         return ret;
2038 }
2039
2040 /**
2041  *  stmmac_release - close entry point of the driver
2042  *  @dev : device pointer.
2043  *  Description:
2044  *  This is the stop entry point of the driver.
2045  */
2046 static int stmmac_release(struct net_device *dev)
2047 {
2048         struct stmmac_priv *priv = netdev_priv(dev);
2049
2050         if (priv->eee_enabled)
2051                 del_timer_sync(&priv->eee_ctrl_timer);
2052
2053         /* Stop and disconnect the PHY */
2054         if (dev->phydev) {
2055                 phy_stop(dev->phydev);
2056                 phy_disconnect(dev->phydev);
2057         }
2058
2059         netif_stop_queue(dev);
2060
2061         napi_disable(&priv->napi);
2062
2063         del_timer_sync(&priv->txtimer);
2064
2065         /* Free the IRQ lines */
2066         free_irq(dev->irq, dev);
2067         if (priv->wol_irq != dev->irq)
2068                 free_irq(priv->wol_irq, dev);
2069         if (priv->lpi_irq > 0)
2070                 free_irq(priv->lpi_irq, dev);
2071
2072         /* Stop TX/RX DMA and clear the descriptors */
2073         priv->hw->dma->stop_tx(priv->ioaddr);
2074         priv->hw->dma->stop_rx(priv->ioaddr);
2075
2076         /* Release and free the Rx/Tx resources */
2077         free_dma_desc_resources(priv);
2078
2079         /* Disable the MAC Rx/Tx */
2080         stmmac_set_mac(priv->ioaddr, false);
2081
2082         netif_carrier_off(dev);
2083
2084 #ifdef CONFIG_DEBUG_FS
2085         stmmac_exit_fs(dev);
2086 #endif
2087
2088         stmmac_release_ptp(priv);
2089
2090         return 0;
2091 }
2092
2093 /**
2094  *  stmmac_tso_allocator - close entry point of the driver
2095  *  @priv: driver private structure
2096  *  @des: buffer start address
2097  *  @total_len: total length to fill in descriptors
2098  *  @last_segmant: condition for the last descriptor
2099  *  Description:
2100  *  This function fills descriptor and request new descriptors according to
2101  *  buffer length to fill
2102  */
2103 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2104                                  int total_len, bool last_segment)
2105 {
2106         struct dma_desc *desc;
2107         int tmp_len;
2108         u32 buff_size;
2109
2110         tmp_len = total_len;
2111
2112         while (tmp_len > 0) {
2113                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2114                 desc = priv->dma_tx + priv->cur_tx;
2115
2116                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2117                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2118                             TSO_MAX_BUFF_SIZE : tmp_len;
2119
2120                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2121                         0, 1,
2122                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2123                         0, 0);
2124
2125                 tmp_len -= TSO_MAX_BUFF_SIZE;
2126         }
2127 }
2128
2129 /**
2130  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2131  *  @skb : the socket buffer
2132  *  @dev : device pointer
2133  *  Description: this is the transmit function that is called on TSO frames
2134  *  (support available on GMAC4 and newer chips).
2135  *  Diagram below show the ring programming in case of TSO frames:
2136  *
2137  *  First Descriptor
2138  *   --------
2139  *   | DES0 |---> buffer1 = L2/L3/L4 header
2140  *   | DES1 |---> TCP Payload (can continue on next descr...)
2141  *   | DES2 |---> buffer 1 and 2 len
2142  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2143  *   --------
2144  *      |
2145  *     ...
2146  *      |
2147  *   --------
2148  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2149  *   | DES1 | --|
2150  *   | DES2 | --> buffer 1 and 2 len
2151  *   | DES3 |
2152  *   --------
2153  *
2154  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2155  */
2156 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2157 {
2158         u32 pay_len, mss;
2159         int tmp_pay_len = 0;
2160         struct stmmac_priv *priv = netdev_priv(dev);
2161         int nfrags = skb_shinfo(skb)->nr_frags;
2162         unsigned int first_entry, des;
2163         struct dma_desc *desc, *first, *mss_desc = NULL;
2164         u8 proto_hdr_len;
2165         int i;
2166
2167         /* Compute header lengths */
2168         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2169
2170         /* Desc availability based on threshold should be enough safe */
2171         if (unlikely(stmmac_tx_avail(priv) <
2172                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2173                 if (!netif_queue_stopped(dev)) {
2174                         netif_stop_queue(dev);
2175                         /* This is a hard error, log it. */
2176                         netdev_err(priv->dev,
2177                                    "%s: Tx Ring full when queue awake\n",
2178                                    __func__);
2179                 }
2180                 return NETDEV_TX_BUSY;
2181         }
2182
2183         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2184
2185         mss = skb_shinfo(skb)->gso_size;
2186
2187         /* set new MSS value if needed */
2188         if (mss != priv->mss) {
2189                 mss_desc = priv->dma_tx + priv->cur_tx;
2190                 priv->hw->desc->set_mss(mss_desc, mss);
2191                 priv->mss = mss;
2192                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2193         }
2194
2195         if (netif_msg_tx_queued(priv)) {
2196                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2197                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2198                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2199                         skb->data_len);
2200         }
2201
2202         first_entry = priv->cur_tx;
2203
2204         desc = priv->dma_tx + first_entry;
2205         first = desc;
2206
2207         /* first descriptor: fill Headers on Buf1 */
2208         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2209                              DMA_TO_DEVICE);
2210         if (dma_mapping_error(priv->device, des))
2211                 goto dma_map_err;
2212
2213         priv->tx_skbuff_dma[first_entry].buf = des;
2214         priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2215         priv->tx_skbuff[first_entry] = skb;
2216
2217         first->des0 = cpu_to_le32(des);
2218
2219         /* Fill start of payload in buff2 of first descriptor */
2220         if (pay_len)
2221                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2222
2223         /* If needed take extra descriptors to fill the remaining payload */
2224         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2225
2226         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2227
2228         /* Prepare fragments */
2229         for (i = 0; i < nfrags; i++) {
2230                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2231
2232                 des = skb_frag_dma_map(priv->device, frag, 0,
2233                                        skb_frag_size(frag),
2234                                        DMA_TO_DEVICE);
2235                 if (dma_mapping_error(priv->device, des))
2236                         goto dma_map_err;
2237
2238                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2239                                      (i == nfrags - 1));
2240
2241                 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2242                 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2243                 priv->tx_skbuff[priv->cur_tx] = NULL;
2244                 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2245         }
2246
2247         priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2248
2249         priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2250
2251         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2252                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2253                           __func__);
2254                 netif_stop_queue(dev);
2255         }
2256
2257         dev->stats.tx_bytes += skb->len;
2258         priv->xstats.tx_tso_frames++;
2259         priv->xstats.tx_tso_nfrags += nfrags;
2260
2261         /* Manage tx mitigation */
2262         priv->tx_count_frames += nfrags + 1;
2263         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2264                 mod_timer(&priv->txtimer,
2265                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2266         } else {
2267                 priv->tx_count_frames = 0;
2268                 priv->hw->desc->set_tx_ic(desc);
2269                 priv->xstats.tx_set_ic_bit++;
2270         }
2271
2272         if (!priv->hwts_tx_en)
2273                 skb_tx_timestamp(skb);
2274
2275         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2276                      priv->hwts_tx_en)) {
2277                 /* declare that device is doing timestamping */
2278                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2279                 priv->hw->desc->enable_tx_timestamp(first);
2280         }
2281
2282         /* Complete the first descriptor before granting the DMA */
2283         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2284                         proto_hdr_len,
2285                         pay_len,
2286                         1, priv->tx_skbuff_dma[first_entry].last_segment,
2287                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2288
2289         /* If context desc is used to change MSS */
2290         if (mss_desc)
2291                 priv->hw->desc->set_tx_owner(mss_desc);
2292
2293         /* The own bit must be the latest setting done when prepare the
2294          * descriptor and then barrier is needed to make sure that
2295          * all is coherent before granting the DMA engine.
2296          */
2297         dma_wmb();
2298
2299         if (netif_msg_pktdata(priv)) {
2300                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2301                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2302                         priv->cur_tx, first, nfrags);
2303
2304                 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2305                                              0);
2306
2307                 pr_info(">>> frame to be transmitted: ");
2308                 print_pkt(skb->data, skb_headlen(skb));
2309         }
2310
2311         netdev_sent_queue(dev, skb->len);
2312
2313         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2314                                        STMMAC_CHAN0);
2315
2316         return NETDEV_TX_OK;
2317
2318 dma_map_err:
2319         dev_err(priv->device, "Tx dma map failed\n");
2320         dev_kfree_skb(skb);
2321         priv->dev->stats.tx_dropped++;
2322         return NETDEV_TX_OK;
2323 }
2324
2325 /**
2326  *  stmmac_xmit - Tx entry point of the driver
2327  *  @skb : the socket buffer
2328  *  @dev : device pointer
2329  *  Description : this is the tx entry point of the driver.
2330  *  It programs the chain or the ring and supports oversized frames
2331  *  and SG feature.
2332  */
2333 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2334 {
2335         struct stmmac_priv *priv = netdev_priv(dev);
2336         unsigned int nopaged_len = skb_headlen(skb);
2337         int i, csum_insertion = 0, is_jumbo = 0;
2338         int nfrags = skb_shinfo(skb)->nr_frags;
2339         unsigned int entry, first_entry;
2340         struct dma_desc *desc, *first;
2341         unsigned int enh_desc;
2342         unsigned int des;
2343
2344         /* Manage oversized TCP frames for GMAC4 device */
2345         if (skb_is_gso(skb) && priv->tso) {
2346                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2347                         return stmmac_tso_xmit(skb, dev);
2348         }
2349
2350         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2351                 if (!netif_queue_stopped(dev)) {
2352                         netif_stop_queue(dev);
2353                         /* This is a hard error, log it. */
2354                         netdev_err(priv->dev,
2355                                    "%s: Tx Ring full when queue awake\n",
2356                                    __func__);
2357                 }
2358                 return NETDEV_TX_BUSY;
2359         }
2360
2361         if (priv->tx_path_in_lpi_mode)
2362                 stmmac_disable_eee_mode(priv);
2363
2364         entry = priv->cur_tx;
2365         first_entry = entry;
2366
2367         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2368
2369         if (likely(priv->extend_desc))
2370                 desc = (struct dma_desc *)(priv->dma_etx + entry);
2371         else
2372                 desc = priv->dma_tx + entry;
2373
2374         first = desc;
2375
2376         priv->tx_skbuff[first_entry] = skb;
2377
2378         enh_desc = priv->plat->enh_desc;
2379         /* To program the descriptors according to the size of the frame */
2380         if (enh_desc)
2381                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2382
2383         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2384                                          DWMAC_CORE_4_00)) {
2385                 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2386                 if (unlikely(entry < 0))
2387                         goto dma_map_err;
2388         }
2389
2390         for (i = 0; i < nfrags; i++) {
2391                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2392                 int len = skb_frag_size(frag);
2393                 bool last_segment = (i == (nfrags - 1));
2394
2395                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2396
2397                 if (likely(priv->extend_desc))
2398                         desc = (struct dma_desc *)(priv->dma_etx + entry);
2399                 else
2400                         desc = priv->dma_tx + entry;
2401
2402                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2403                                        DMA_TO_DEVICE);
2404                 if (dma_mapping_error(priv->device, des))
2405                         goto dma_map_err; /* should reuse desc w/o issues */
2406
2407                 priv->tx_skbuff[entry] = NULL;
2408
2409                 priv->tx_skbuff_dma[entry].buf = des;
2410                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2411                         desc->des0 = cpu_to_le32(des);
2412                 else
2413                         desc->des2 = cpu_to_le32(des);
2414
2415                 priv->tx_skbuff_dma[entry].map_as_page = true;
2416                 priv->tx_skbuff_dma[entry].len = len;
2417                 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2418
2419                 /* Prepare the descriptor and set the own bit too */
2420                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2421                                                 priv->mode, 1, last_segment);
2422         }
2423
2424         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2425
2426         priv->cur_tx = entry;
2427
2428         if (netif_msg_pktdata(priv)) {
2429                 void *tx_head;
2430
2431                 netdev_dbg(priv->dev,
2432                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2433                            __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2434                            entry, first, nfrags);
2435
2436                 if (priv->extend_desc)
2437                         tx_head = (void *)priv->dma_etx;
2438                 else
2439                         tx_head = (void *)priv->dma_tx;
2440
2441                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2442
2443                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2444                 print_pkt(skb->data, skb->len);
2445         }
2446
2447         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2448                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2449                           __func__);
2450                 netif_stop_queue(dev);
2451         }
2452
2453         dev->stats.tx_bytes += skb->len;
2454
2455         /* According to the coalesce parameter the IC bit for the latest
2456          * segment is reset and the timer re-started to clean the tx status.
2457          * This approach takes care about the fragments: desc is the first
2458          * element in case of no SG.
2459          */
2460         priv->tx_count_frames += nfrags + 1;
2461         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2462                 mod_timer(&priv->txtimer,
2463                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2464         } else {
2465                 priv->tx_count_frames = 0;
2466                 priv->hw->desc->set_tx_ic(desc);
2467                 priv->xstats.tx_set_ic_bit++;
2468         }
2469
2470         if (!priv->hwts_tx_en)
2471                 skb_tx_timestamp(skb);
2472
2473         /* Ready to fill the first descriptor and set the OWN bit w/o any
2474          * problems because all the descriptors are actually ready to be
2475          * passed to the DMA engine.
2476          */
2477         if (likely(!is_jumbo)) {
2478                 bool last_segment = (nfrags == 0);
2479
2480                 des = dma_map_single(priv->device, skb->data,
2481                                      nopaged_len, DMA_TO_DEVICE);
2482                 if (dma_mapping_error(priv->device, des))
2483                         goto dma_map_err;
2484
2485                 priv->tx_skbuff_dma[first_entry].buf = des;
2486                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2487                         first->des0 = cpu_to_le32(des);
2488                 else
2489                         first->des2 = cpu_to_le32(des);
2490
2491                 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2492                 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2493
2494                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2495                              priv->hwts_tx_en)) {
2496                         /* declare that device is doing timestamping */
2497                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2498                         priv->hw->desc->enable_tx_timestamp(first);
2499                 }
2500
2501                 /* Prepare the first descriptor setting the OWN bit too */
2502                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2503                                                 csum_insertion, priv->mode, 1,
2504                                                 last_segment);
2505
2506                 /* The own bit must be the latest setting done when prepare the
2507                  * descriptor and then barrier is needed to make sure that
2508                  * all is coherent before granting the DMA engine.
2509                  */
2510                 dma_wmb();
2511         }
2512
2513         netdev_sent_queue(dev, skb->len);
2514
2515         if (priv->synopsys_id < DWMAC_CORE_4_00)
2516                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2517         else
2518                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2519                                                STMMAC_CHAN0);
2520
2521         return NETDEV_TX_OK;
2522
2523 dma_map_err:
2524         netdev_err(priv->dev, "Tx DMA map failed\n");
2525         dev_kfree_skb(skb);
2526         priv->dev->stats.tx_dropped++;
2527         return NETDEV_TX_OK;
2528 }
2529
2530 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2531 {
2532         struct ethhdr *ehdr;
2533         u16 vlanid;
2534
2535         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2536             NETIF_F_HW_VLAN_CTAG_RX &&
2537             !__vlan_get_tag(skb, &vlanid)) {
2538                 /* pop the vlan tag */
2539                 ehdr = (struct ethhdr *)skb->data;
2540                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2541                 skb_pull(skb, VLAN_HLEN);
2542                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2543         }
2544 }
2545
2546
2547 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2548 {
2549         if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2550                 return 0;
2551
2552         return 1;
2553 }
2554
2555 /**
2556  * stmmac_rx_refill - refill used skb preallocated buffers
2557  * @priv: driver private structure
2558  * Description : this is to reallocate the skb for the reception process
2559  * that is based on zero-copy.
2560  */
2561 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2562 {
2563         int bfsize = priv->dma_buf_sz;
2564         unsigned int entry = priv->dirty_rx;
2565         int dirty = stmmac_rx_dirty(priv);
2566
2567         while (dirty-- > 0) {
2568                 struct dma_desc *p;
2569
2570                 if (priv->extend_desc)
2571                         p = (struct dma_desc *)(priv->dma_erx + entry);
2572                 else
2573                         p = priv->dma_rx + entry;
2574
2575                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2576                         struct sk_buff *skb;
2577
2578                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2579                         if (unlikely(!skb)) {
2580                                 /* so for a while no zero-copy! */
2581                                 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2582                                 if (unlikely(net_ratelimit()))
2583                                         dev_err(priv->device,
2584                                                 "fail to alloc skb entry %d\n",
2585                                                 entry);
2586                                 break;
2587                         }
2588
2589                         priv->rx_skbuff[entry] = skb;
2590                         priv->rx_skbuff_dma[entry] =
2591                             dma_map_single(priv->device, skb->data, bfsize,
2592                                            DMA_FROM_DEVICE);
2593                         if (dma_mapping_error(priv->device,
2594                                               priv->rx_skbuff_dma[entry])) {
2595                                 netdev_err(priv->dev, "Rx DMA map failed\n");
2596                                 dev_kfree_skb(skb);
2597                                 break;
2598                         }
2599
2600                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2601                                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2602                                 p->des1 = 0;
2603                         } else {
2604                                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2605                         }
2606                         if (priv->hw->mode->refill_desc3)
2607                                 priv->hw->mode->refill_desc3(priv, p);
2608
2609                         if (priv->rx_zeroc_thresh > 0)
2610                                 priv->rx_zeroc_thresh--;
2611
2612                         netif_dbg(priv, rx_status, priv->dev,
2613                                   "refill entry #%d\n", entry);
2614                 }
2615                 dma_wmb();
2616
2617                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2618                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2619                 else
2620                         priv->hw->desc->set_rx_owner(p);
2621
2622                 dma_wmb();
2623
2624                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2625         }
2626         priv->dirty_rx = entry;
2627 }
2628
2629 /**
2630  * stmmac_rx - manage the receive process
2631  * @priv: driver private structure
2632  * @limit: napi bugget.
2633  * Description :  this the function called by the napi poll method.
2634  * It gets all the frames inside the ring.
2635  */
2636 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2637 {
2638         unsigned int entry = priv->cur_rx;
2639         unsigned int next_entry;
2640         unsigned int count = 0;
2641         int coe = priv->hw->rx_csum;
2642
2643         if (netif_msg_rx_status(priv)) {
2644                 void *rx_head;
2645
2646                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2647                 if (priv->extend_desc)
2648                         rx_head = (void *)priv->dma_erx;
2649                 else
2650                         rx_head = (void *)priv->dma_rx;
2651
2652                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2653         }
2654         while (count < limit) {
2655                 int status;
2656                 struct dma_desc *p;
2657                 struct dma_desc *np;
2658
2659                 if (priv->extend_desc)
2660                         p = (struct dma_desc *)(priv->dma_erx + entry);
2661                 else
2662                         p = priv->dma_rx + entry;
2663
2664                 /* read the status of the incoming frame */
2665                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2666                                                    &priv->xstats, p);
2667                 /* check if managed by the DMA otherwise go ahead */
2668                 if (unlikely(status & dma_own))
2669                         break;
2670
2671                 count++;
2672
2673                 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2674                 next_entry = priv->cur_rx;
2675
2676                 if (priv->extend_desc)
2677                         np = (struct dma_desc *)(priv->dma_erx + next_entry);
2678                 else
2679                         np = priv->dma_rx + next_entry;
2680
2681                 prefetch(np);
2682
2683                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2684                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2685                                                            &priv->xstats,
2686                                                            priv->dma_erx +
2687                                                            entry);
2688                 if (unlikely(status == discard_frame)) {
2689                         priv->dev->stats.rx_errors++;
2690                         if (priv->hwts_rx_en && !priv->extend_desc) {
2691                                 /* DESC2 & DESC3 will be overwritten by device
2692                                  * with timestamp value, hence reinitialize
2693                                  * them in stmmac_rx_refill() function so that
2694                                  * device can reuse it.
2695                                  */
2696                                 priv->rx_skbuff[entry] = NULL;
2697                                 dma_unmap_single(priv->device,
2698                                                  priv->rx_skbuff_dma[entry],
2699                                                  priv->dma_buf_sz,
2700                                                  DMA_FROM_DEVICE);
2701                         }
2702                 } else {
2703                         struct sk_buff *skb;
2704                         int frame_len;
2705                         unsigned int des;
2706
2707                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2708                                 des = le32_to_cpu(p->des0);
2709                         else
2710                                 des = le32_to_cpu(p->des2);
2711
2712                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2713
2714                         /*  If frame length is greater than skb buffer size
2715                          *  (preallocated during init) then the packet is
2716                          *  ignored
2717                          */
2718                         if (frame_len > priv->dma_buf_sz) {
2719                                 netdev_err(priv->dev,
2720                                            "len %d larger than size (%d)\n",
2721                                            frame_len, priv->dma_buf_sz);
2722                                 priv->dev->stats.rx_length_errors++;
2723                                 break;
2724                         }
2725
2726                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2727                          * Type frames (LLC/LLC-SNAP)
2728                          */
2729                         if (unlikely(status != llc_snap))
2730                                 frame_len -= ETH_FCS_LEN;
2731
2732                         if (netif_msg_rx_status(priv)) {
2733                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2734                                            p, entry, des);
2735                                 if (frame_len > ETH_FRAME_LEN)
2736                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2737                                                    frame_len, status);
2738                         }
2739
2740                         /* The zero-copy is always used for all the sizes
2741                          * in case of GMAC4 because it needs
2742                          * to refill the used descriptors, always.
2743                          */
2744                         if (unlikely(!priv->plat->has_gmac4 &&
2745                                      ((frame_len < priv->rx_copybreak) ||
2746                                      stmmac_rx_threshold_count(priv)))) {
2747                                 skb = netdev_alloc_skb_ip_align(priv->dev,
2748                                                                 frame_len);
2749                                 if (unlikely(!skb)) {
2750                                         if (net_ratelimit())
2751                                                 dev_warn(priv->device,
2752                                                          "packet dropped\n");
2753                                         priv->dev->stats.rx_dropped++;
2754                                         break;
2755                                 }
2756
2757                                 dma_sync_single_for_cpu(priv->device,
2758                                                         priv->rx_skbuff_dma
2759                                                         [entry], frame_len,
2760                                                         DMA_FROM_DEVICE);
2761                                 skb_copy_to_linear_data(skb,
2762                                                         priv->
2763                                                         rx_skbuff[entry]->data,
2764                                                         frame_len);
2765
2766                                 skb_put(skb, frame_len);
2767                                 dma_sync_single_for_device(priv->device,
2768                                                            priv->rx_skbuff_dma
2769                                                            [entry], frame_len,
2770                                                            DMA_FROM_DEVICE);
2771                         } else {
2772                                 skb = priv->rx_skbuff[entry];
2773                                 if (unlikely(!skb)) {
2774                                         netdev_err(priv->dev,
2775                                                    "%s: Inconsistent Rx chain\n",
2776                                                    priv->dev->name);
2777                                         priv->dev->stats.rx_dropped++;
2778                                         break;
2779                                 }
2780                                 prefetch(skb->data - NET_IP_ALIGN);
2781                                 priv->rx_skbuff[entry] = NULL;
2782                                 priv->rx_zeroc_thresh++;
2783
2784                                 skb_put(skb, frame_len);
2785                                 dma_unmap_single(priv->device,
2786                                                  priv->rx_skbuff_dma[entry],
2787                                                  priv->dma_buf_sz,
2788                                                  DMA_FROM_DEVICE);
2789                         }
2790
2791                         if (netif_msg_pktdata(priv)) {
2792                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
2793                                            frame_len);
2794                                 print_pkt(skb->data, frame_len);
2795                         }
2796
2797                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
2798
2799                         stmmac_rx_vlan(priv->dev, skb);
2800
2801                         skb->protocol = eth_type_trans(skb, priv->dev);
2802
2803                         if (unlikely(!coe))
2804                                 skb_checksum_none_assert(skb);
2805                         else
2806                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2807
2808                         napi_gro_receive(&priv->napi, skb);
2809
2810                         priv->dev->stats.rx_packets++;
2811                         priv->dev->stats.rx_bytes += frame_len;
2812                 }
2813                 entry = next_entry;
2814         }
2815
2816         stmmac_rx_refill(priv);
2817
2818         priv->xstats.rx_pkt_n += count;
2819
2820         return count;
2821 }
2822
2823 /**
2824  *  stmmac_poll - stmmac poll method (NAPI)
2825  *  @napi : pointer to the napi structure.
2826  *  @budget : maximum number of packets that the current CPU can receive from
2827  *            all interfaces.
2828  *  Description :
2829  *  To look at the incoming frames and clear the tx resources.
2830  */
2831 static int stmmac_poll(struct napi_struct *napi, int budget)
2832 {
2833         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2834         int work_done = 0;
2835         u32 chan = STMMAC_CHAN0;
2836
2837         priv->xstats.napi_poll++;
2838         stmmac_tx_clean(priv);
2839
2840         work_done = stmmac_rx(priv, budget);
2841         if (work_done < budget) {
2842                 napi_complete_done(napi, work_done);
2843                 stmmac_enable_dma_irq(priv, chan);
2844         }
2845         return work_done;
2846 }
2847
2848 /**
2849  *  stmmac_tx_timeout
2850  *  @dev : Pointer to net device structure
2851  *  Description: this function is called when a packet transmission fails to
2852  *   complete within a reasonable time. The driver will mark the error in the
2853  *   netdev structure and arrange for the device to be reset to a sane state
2854  *   in order to transmit a new packet.
2855  */
2856 static void stmmac_tx_timeout(struct net_device *dev)
2857 {
2858         struct stmmac_priv *priv = netdev_priv(dev);
2859
2860         /* Clear Tx resources and restart transmitting again */
2861         stmmac_tx_err(priv);
2862 }
2863
2864 /**
2865  *  stmmac_set_rx_mode - entry point for multicast addressing
2866  *  @dev : pointer to the device structure
2867  *  Description:
2868  *  This function is a driver entry point which gets called by the kernel
2869  *  whenever multicast addresses must be enabled/disabled.
2870  *  Return value:
2871  *  void.
2872  */
2873 static void stmmac_set_rx_mode(struct net_device *dev)
2874 {
2875         struct stmmac_priv *priv = netdev_priv(dev);
2876
2877         priv->hw->mac->set_filter(priv->hw, dev);
2878 }
2879
2880 /**
2881  *  stmmac_change_mtu - entry point to change MTU size for the device.
2882  *  @dev : device pointer.
2883  *  @new_mtu : the new MTU size for the device.
2884  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2885  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2886  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2887  *  Return value:
2888  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2889  *  file on failure.
2890  */
2891 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2892 {
2893         struct stmmac_priv *priv = netdev_priv(dev);
2894
2895         if (netif_running(dev)) {
2896                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
2897                 return -EBUSY;
2898         }
2899
2900         dev->mtu = new_mtu;
2901
2902         netdev_update_features(dev);
2903
2904         return 0;
2905 }
2906
2907 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2908                                              netdev_features_t features)
2909 {
2910         struct stmmac_priv *priv = netdev_priv(dev);
2911
2912         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2913                 features &= ~NETIF_F_RXCSUM;
2914
2915         if (!priv->plat->tx_coe)
2916                 features &= ~NETIF_F_CSUM_MASK;
2917
2918         /* Some GMAC devices have a bugged Jumbo frame support that
2919          * needs to have the Tx COE disabled for oversized frames
2920          * (due to limited buffer sizes). In this case we disable
2921          * the TX csum insertion in the TDES and not use SF.
2922          */
2923         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2924                 features &= ~NETIF_F_CSUM_MASK;
2925
2926         /* Disable tso if asked by ethtool */
2927         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2928                 if (features & NETIF_F_TSO)
2929                         priv->tso = true;
2930                 else
2931                         priv->tso = false;
2932         }
2933
2934         return features;
2935 }
2936
2937 static int stmmac_set_features(struct net_device *netdev,
2938                                netdev_features_t features)
2939 {
2940         struct stmmac_priv *priv = netdev_priv(netdev);
2941
2942         /* Keep the COE Type in case of csum is supporting */
2943         if (features & NETIF_F_RXCSUM)
2944                 priv->hw->rx_csum = priv->plat->rx_coe;
2945         else
2946                 priv->hw->rx_csum = 0;
2947         /* No check needed because rx_coe has been set before and it will be
2948          * fixed in case of issue.
2949          */
2950         priv->hw->mac->rx_ipc(priv->hw);
2951
2952         return 0;
2953 }
2954
2955 /**
2956  *  stmmac_interrupt - main ISR
2957  *  @irq: interrupt number.
2958  *  @dev_id: to pass the net device pointer.
2959  *  Description: this is the main driver interrupt service routine.
2960  *  It can call:
2961  *  o DMA service routine (to manage incoming frame reception and transmission
2962  *    status)
2963  *  o Core interrupts to manage: remote wake-up, management counter, LPI
2964  *    interrupts.
2965  */
2966 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2967 {
2968         struct net_device *dev = (struct net_device *)dev_id;
2969         struct stmmac_priv *priv = netdev_priv(dev);
2970
2971         if (priv->irq_wake)
2972                 pm_wakeup_event(priv->device, 0);
2973
2974         if (unlikely(!dev)) {
2975                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2976                 return IRQ_NONE;
2977         }
2978
2979         /* To handle GMAC own interrupts */
2980         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2981                 int status = priv->hw->mac->host_irq_status(priv->hw,
2982                                                             &priv->xstats);
2983
2984                 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2985                         status |= priv->hw->mac->host_mtl_irq_status(priv->hw,
2986                                                                 STMMAC_CHAN0);
2987
2988                 if (unlikely(status)) {
2989                         /* For LPI we need to save the tx status */
2990                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2991                                 priv->tx_path_in_lpi_mode = true;
2992                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2993                                 priv->tx_path_in_lpi_mode = false;
2994                         if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2995                                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2996                                                         priv->rx_tail_addr,
2997                                                         STMMAC_CHAN0);
2998                 }
2999
3000                 /* PCS link status */
3001                 if (priv->hw->pcs) {
3002                         if (priv->xstats.pcs_link)
3003                                 netif_carrier_on(dev);
3004                         else
3005                                 netif_carrier_off(dev);
3006                 }
3007         }
3008
3009         /* To handle DMA interrupts */
3010         stmmac_dma_interrupt(priv);
3011
3012         return IRQ_HANDLED;
3013 }
3014
3015 #ifdef CONFIG_NET_POLL_CONTROLLER
3016 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3017  * to allow network I/O with interrupts disabled.
3018  */
3019 static void stmmac_poll_controller(struct net_device *dev)
3020 {
3021         disable_irq(dev->irq);
3022         stmmac_interrupt(dev->irq, dev);
3023         enable_irq(dev->irq);
3024 }
3025 #endif
3026
3027 /**
3028  *  stmmac_ioctl - Entry point for the Ioctl
3029  *  @dev: Device pointer.
3030  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3031  *  a proprietary structure used to pass information to the driver.
3032  *  @cmd: IOCTL command
3033  *  Description:
3034  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3035  */
3036 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3037 {
3038         int ret = -EOPNOTSUPP;
3039
3040         if (!netif_running(dev))
3041                 return -EINVAL;
3042
3043         switch (cmd) {
3044         case SIOCGMIIPHY:
3045         case SIOCGMIIREG:
3046         case SIOCSMIIREG:
3047                 if (!dev->phydev)
3048                         return -EINVAL;
3049                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3050                 break;
3051         case SIOCSHWTSTAMP:
3052                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3053                 break;
3054         default:
3055                 break;
3056         }
3057
3058         return ret;
3059 }
3060
3061 #ifdef CONFIG_DEBUG_FS
3062 static struct dentry *stmmac_fs_dir;
3063
3064 static void sysfs_display_ring(void *head, int size, int extend_desc,
3065                                struct seq_file *seq)
3066 {
3067         int i;
3068         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3069         struct dma_desc *p = (struct dma_desc *)head;
3070
3071         for (i = 0; i < size; i++) {
3072                 if (extend_desc) {
3073                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3074                                    i, (unsigned int)virt_to_phys(ep),
3075                                    le32_to_cpu(ep->basic.des0),
3076                                    le32_to_cpu(ep->basic.des1),
3077                                    le32_to_cpu(ep->basic.des2),
3078                                    le32_to_cpu(ep->basic.des3));
3079                         ep++;
3080                 } else {
3081                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3082                                    i, (unsigned int)virt_to_phys(ep),
3083                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3084                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3085                         p++;
3086                 }
3087                 seq_printf(seq, "\n");
3088         }
3089 }
3090
3091 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3092 {
3093         struct net_device *dev = seq->private;
3094         struct stmmac_priv *priv = netdev_priv(dev);
3095
3096         if (priv->extend_desc) {
3097                 seq_printf(seq, "Extended RX descriptor ring:\n");
3098                 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
3099                 seq_printf(seq, "Extended TX descriptor ring:\n");
3100                 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
3101         } else {
3102                 seq_printf(seq, "RX descriptor ring:\n");
3103                 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
3104                 seq_printf(seq, "TX descriptor ring:\n");
3105                 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
3106         }
3107
3108         return 0;
3109 }
3110
3111 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3112 {
3113         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3114 }
3115
3116 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3117
3118 static const struct file_operations stmmac_rings_status_fops = {
3119         .owner = THIS_MODULE,
3120         .open = stmmac_sysfs_ring_open,
3121         .read = seq_read,
3122         .llseek = seq_lseek,
3123         .release = single_release,
3124 };
3125
3126 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3127 {
3128         struct net_device *dev = seq->private;
3129         struct stmmac_priv *priv = netdev_priv(dev);
3130
3131         if (!priv->hw_cap_support) {
3132                 seq_printf(seq, "DMA HW features not supported\n");
3133                 return 0;
3134         }
3135
3136         seq_printf(seq, "==============================\n");
3137         seq_printf(seq, "\tDMA HW features\n");
3138         seq_printf(seq, "==============================\n");
3139
3140         seq_printf(seq, "\t10/100 Mbps: %s\n",
3141                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3142         seq_printf(seq, "\t1000 Mbps: %s\n",
3143                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3144         seq_printf(seq, "\tHalf duplex: %s\n",
3145                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3146         seq_printf(seq, "\tHash Filter: %s\n",
3147                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3148         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3149                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3150         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3151                    (priv->dma_cap.pcs) ? "Y" : "N");
3152         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3153                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3154         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3155                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3156         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3157                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3158         seq_printf(seq, "\tRMON module: %s\n",
3159                    (priv->dma_cap.rmon) ? "Y" : "N");
3160         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3161                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3162         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3163                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3164         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3165                    (priv->dma_cap.eee) ? "Y" : "N");
3166         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3167         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3168                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3169         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3170                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3171                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3172         } else {
3173                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3174                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3175                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3176                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3177         }
3178         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3179                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3180         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3181                    priv->dma_cap.number_rx_channel);
3182         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3183                    priv->dma_cap.number_tx_channel);
3184         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3185                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3186
3187         return 0;
3188 }
3189
3190 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3191 {
3192         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3193 }
3194
3195 static const struct file_operations stmmac_dma_cap_fops = {
3196         .owner = THIS_MODULE,
3197         .open = stmmac_sysfs_dma_cap_open,
3198         .read = seq_read,
3199         .llseek = seq_lseek,
3200         .release = single_release,
3201 };
3202
3203 static int stmmac_init_fs(struct net_device *dev)
3204 {
3205         struct stmmac_priv *priv = netdev_priv(dev);
3206
3207         /* Create per netdev entries */
3208         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3209
3210         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3211                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3212
3213                 return -ENOMEM;
3214         }
3215
3216         /* Entry to report DMA RX/TX rings */
3217         priv->dbgfs_rings_status =
3218                 debugfs_create_file("descriptors_status", S_IRUGO,
3219                                     priv->dbgfs_dir, dev,
3220                                     &stmmac_rings_status_fops);
3221
3222         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3223                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3224                 debugfs_remove_recursive(priv->dbgfs_dir);
3225
3226                 return -ENOMEM;
3227         }
3228
3229         /* Entry to report the DMA HW features */
3230         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3231                                             priv->dbgfs_dir,
3232                                             dev, &stmmac_dma_cap_fops);
3233
3234         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3235                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3236                 debugfs_remove_recursive(priv->dbgfs_dir);
3237
3238                 return -ENOMEM;
3239         }
3240
3241         return 0;
3242 }
3243
3244 static void stmmac_exit_fs(struct net_device *dev)
3245 {
3246         struct stmmac_priv *priv = netdev_priv(dev);
3247
3248         debugfs_remove_recursive(priv->dbgfs_dir);
3249 }
3250 #endif /* CONFIG_DEBUG_FS */
3251
3252 static const struct net_device_ops stmmac_netdev_ops = {
3253         .ndo_open = stmmac_open,
3254         .ndo_start_xmit = stmmac_xmit,
3255         .ndo_stop = stmmac_release,
3256         .ndo_change_mtu = stmmac_change_mtu,
3257         .ndo_fix_features = stmmac_fix_features,
3258         .ndo_set_features = stmmac_set_features,
3259         .ndo_set_rx_mode = stmmac_set_rx_mode,
3260         .ndo_tx_timeout = stmmac_tx_timeout,
3261         .ndo_do_ioctl = stmmac_ioctl,
3262 #ifdef CONFIG_NET_POLL_CONTROLLER
3263         .ndo_poll_controller = stmmac_poll_controller,
3264 #endif
3265         .ndo_set_mac_address = eth_mac_addr,
3266 };
3267
3268 /**
3269  *  stmmac_hw_init - Init the MAC device
3270  *  @priv: driver private structure
3271  *  Description: this function is to configure the MAC device according to
3272  *  some platform parameters or the HW capability register. It prepares the
3273  *  driver to use either ring or chain modes and to setup either enhanced or
3274  *  normal descriptors.
3275  */
3276 static int stmmac_hw_init(struct stmmac_priv *priv)
3277 {
3278         struct mac_device_info *mac;
3279
3280         /* Identify the MAC HW device */
3281         if (priv->plat->has_gmac) {
3282                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3283                 mac = dwmac1000_setup(priv->ioaddr,
3284                                       priv->plat->multicast_filter_bins,
3285                                       priv->plat->unicast_filter_entries,
3286                                       &priv->synopsys_id);
3287         } else if (priv->plat->has_gmac4) {
3288                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3289                 mac = dwmac4_setup(priv->ioaddr,
3290                                    priv->plat->multicast_filter_bins,
3291                                    priv->plat->unicast_filter_entries,
3292                                    &priv->synopsys_id);
3293         } else {
3294                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3295         }
3296         if (!mac)
3297                 return -ENOMEM;
3298
3299         priv->hw = mac;
3300
3301         /* To use the chained or ring mode */
3302         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3303                 priv->hw->mode = &dwmac4_ring_mode_ops;
3304         } else {
3305                 if (chain_mode) {
3306                         priv->hw->mode = &chain_mode_ops;
3307                         dev_info(priv->device, "Chain mode enabled\n");
3308                         priv->mode = STMMAC_CHAIN_MODE;
3309                 } else {
3310                         priv->hw->mode = &ring_mode_ops;
3311                         dev_info(priv->device, "Ring mode enabled\n");
3312                         priv->mode = STMMAC_RING_MODE;
3313                 }
3314         }
3315
3316         /* Get the HW capability (new GMAC newer than 3.50a) */
3317         priv->hw_cap_support = stmmac_get_hw_features(priv);
3318         if (priv->hw_cap_support) {
3319                 dev_info(priv->device, "DMA HW capability register supported\n");
3320
3321                 /* We can override some gmac/dma configuration fields: e.g.
3322                  * enh_desc, tx_coe (e.g. that are passed through the
3323                  * platform) with the values from the HW capability
3324                  * register (if supported).
3325                  */
3326                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3327                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3328                 priv->hw->pmt = priv->plat->pmt;
3329
3330                 /* TXCOE doesn't work in thresh DMA mode */
3331                 if (priv->plat->force_thresh_dma_mode)
3332                         priv->plat->tx_coe = 0;
3333                 else
3334                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3335
3336                 /* In case of GMAC4 rx_coe is from HW cap register. */
3337                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3338
3339                 if (priv->dma_cap.rx_coe_type2)
3340                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3341                 else if (priv->dma_cap.rx_coe_type1)
3342                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3343
3344         } else {
3345                 dev_info(priv->device, "No HW DMA feature register supported\n");
3346         }
3347
3348         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3349         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3350                 priv->hw->desc = &dwmac4_desc_ops;
3351         else
3352                 stmmac_selec_desc_mode(priv);
3353
3354         if (priv->plat->rx_coe) {
3355                 priv->hw->rx_csum = priv->plat->rx_coe;
3356                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3357                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3358                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3359         }
3360         if (priv->plat->tx_coe)
3361                 dev_info(priv->device, "TX Checksum insertion supported\n");
3362
3363         if (priv->plat->pmt) {
3364                 dev_info(priv->device, "Wake-Up On Lan supported\n");
3365                 device_set_wakeup_capable(priv->device, 1);
3366         }
3367
3368         if (priv->dma_cap.tsoen)
3369                 dev_info(priv->device, "TSO supported\n");
3370
3371         return 0;
3372 }
3373
3374 /**
3375  * stmmac_dvr_probe
3376  * @device: device pointer
3377  * @plat_dat: platform data pointer
3378  * @res: stmmac resource pointer
3379  * Description: this is the main probe function used to
3380  * call the alloc_etherdev, allocate the priv structure.
3381  * Return:
3382  * returns 0 on success, otherwise errno.
3383  */
3384 int stmmac_dvr_probe(struct device *device,
3385                      struct plat_stmmacenet_data *plat_dat,
3386                      struct stmmac_resources *res)
3387 {
3388         int ret = 0;
3389         struct net_device *ndev = NULL;
3390         struct stmmac_priv *priv;
3391
3392         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3393         if (!ndev)
3394                 return -ENOMEM;
3395
3396         SET_NETDEV_DEV(ndev, device);
3397
3398         priv = netdev_priv(ndev);
3399         priv->device = device;
3400         priv->dev = ndev;
3401
3402         stmmac_set_ethtool_ops(ndev);
3403         priv->pause = pause;
3404         priv->plat = plat_dat;
3405         priv->ioaddr = res->addr;
3406         priv->dev->base_addr = (unsigned long)res->addr;
3407
3408         priv->dev->irq = res->irq;
3409         priv->wol_irq = res->wol_irq;
3410         priv->lpi_irq = res->lpi_irq;
3411
3412         if (res->mac)
3413                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3414
3415         dev_set_drvdata(device, priv->dev);
3416
3417         /* Verify driver arguments */
3418         stmmac_verify_args();
3419
3420         /* Override with kernel parameters if supplied XXX CRS XXX
3421          * this needs to have multiple instances
3422          */
3423         if ((phyaddr >= 0) && (phyaddr <= 31))
3424                 priv->plat->phy_addr = phyaddr;
3425
3426         if (priv->plat->stmmac_rst)
3427                 reset_control_deassert(priv->plat->stmmac_rst);
3428
3429         /* Init MAC and get the capabilities */
3430         ret = stmmac_hw_init(priv);
3431         if (ret)
3432                 goto error_hw_init;
3433
3434         ndev->netdev_ops = &stmmac_netdev_ops;
3435
3436         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3437                             NETIF_F_RXCSUM;
3438
3439         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3440                 ndev->hw_features |= NETIF_F_TSO;
3441                 priv->tso = true;
3442                 dev_info(priv->device, "TSO feature enabled\n");
3443         }
3444         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3445         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3446 #ifdef STMMAC_VLAN_TAG_USED
3447         /* Both mac100 and gmac support receive VLAN tag detection */
3448         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3449 #endif
3450         priv->msg_enable = netif_msg_init(debug, default_msg_level);
3451
3452         /* MTU range: 46 - hw-specific max */
3453         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3454         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3455                 ndev->max_mtu = JUMBO_LEN;
3456         else
3457                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3458         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3459          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3460          */
3461         if ((priv->plat->maxmtu < ndev->max_mtu) &&
3462             (priv->plat->maxmtu >= ndev->min_mtu))
3463                 ndev->max_mtu = priv->plat->maxmtu;
3464         else if (priv->plat->maxmtu < ndev->min_mtu)
3465                 dev_warn(priv->device,
3466                          "%s: warning: maxmtu having invalid value (%d)\n",
3467                          __func__, priv->plat->maxmtu);
3468
3469         if (flow_ctrl)
3470                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
3471
3472         /* Rx Watchdog is available in the COREs newer than the 3.40.
3473          * In some case, for example on bugged HW this feature
3474          * has to be disable and this can be done by passing the
3475          * riwt_off field from the platform.
3476          */
3477         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3478                 priv->use_riwt = 1;
3479                 dev_info(priv->device,
3480                          "Enable RX Mitigation via HW Watchdog Timer\n");
3481         }
3482
3483         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3484
3485         spin_lock_init(&priv->lock);
3486
3487         /* If a specific clk_csr value is passed from the platform
3488          * this means that the CSR Clock Range selection cannot be
3489          * changed at run-time and it is fixed. Viceversa the driver'll try to
3490          * set the MDC clock dynamically according to the csr actual
3491          * clock input.
3492          */
3493         if (!priv->plat->clk_csr)
3494                 stmmac_clk_csr_set(priv);
3495         else
3496                 priv->clk_csr = priv->plat->clk_csr;
3497
3498         stmmac_check_pcs_mode(priv);
3499
3500         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
3501             priv->hw->pcs != STMMAC_PCS_TBI &&
3502             priv->hw->pcs != STMMAC_PCS_RTBI) {
3503                 /* MDIO bus Registration */
3504                 ret = stmmac_mdio_register(ndev);
3505                 if (ret < 0) {
3506                         dev_err(priv->device,
3507                                 "%s: MDIO bus (id: %d) registration failed",
3508                                 __func__, priv->plat->bus_id);
3509                         goto error_mdio_register;
3510                 }
3511         }
3512
3513         ret = register_netdev(ndev);
3514         if (ret) {
3515                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3516                         __func__, ret);
3517                 goto error_netdev_register;
3518         }
3519
3520         return ret;
3521
3522 error_netdev_register:
3523         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3524             priv->hw->pcs != STMMAC_PCS_TBI &&
3525             priv->hw->pcs != STMMAC_PCS_RTBI)
3526                 stmmac_mdio_unregister(ndev);
3527 error_mdio_register:
3528         netif_napi_del(&priv->napi);
3529 error_hw_init:
3530         free_netdev(ndev);
3531
3532         return ret;
3533 }
3534 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3535
3536 /**
3537  * stmmac_dvr_remove
3538  * @dev: device pointer
3539  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3540  * changes the link status, releases the DMA descriptor rings.
3541  */
3542 int stmmac_dvr_remove(struct device *dev)
3543 {
3544         struct net_device *ndev = dev_get_drvdata(dev);
3545         struct stmmac_priv *priv = netdev_priv(ndev);
3546
3547         netdev_info(priv->dev, "%s: removing driver", __func__);
3548
3549         priv->hw->dma->stop_rx(priv->ioaddr);
3550         priv->hw->dma->stop_tx(priv->ioaddr);
3551
3552         stmmac_set_mac(priv->ioaddr, false);
3553         netif_carrier_off(ndev);
3554         unregister_netdev(ndev);
3555         if (priv->plat->stmmac_rst)
3556                 reset_control_assert(priv->plat->stmmac_rst);
3557         clk_disable_unprepare(priv->plat->pclk);
3558         clk_disable_unprepare(priv->plat->stmmac_clk);
3559         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3560             priv->hw->pcs != STMMAC_PCS_TBI &&
3561             priv->hw->pcs != STMMAC_PCS_RTBI)
3562                 stmmac_mdio_unregister(ndev);
3563         free_netdev(ndev);
3564
3565         return 0;
3566 }
3567 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3568
3569 /**
3570  * stmmac_suspend - suspend callback
3571  * @dev: device pointer
3572  * Description: this is the function to suspend the device and it is called
3573  * by the platform driver to stop the network queue, release the resources,
3574  * program the PMT register (for WoL), clean and release driver resources.
3575  */
3576 int stmmac_suspend(struct device *dev)
3577 {
3578         struct net_device *ndev = dev_get_drvdata(dev);
3579         struct stmmac_priv *priv = netdev_priv(ndev);
3580         unsigned long flags;
3581
3582         if (!ndev || !netif_running(ndev))
3583                 return 0;
3584
3585         if (ndev->phydev)
3586                 phy_stop(ndev->phydev);
3587
3588         spin_lock_irqsave(&priv->lock, flags);
3589
3590         netif_device_detach(ndev);
3591         netif_stop_queue(ndev);
3592
3593         napi_disable(&priv->napi);
3594
3595         /* Stop TX/RX DMA */
3596         priv->hw->dma->stop_tx(priv->ioaddr);
3597         priv->hw->dma->stop_rx(priv->ioaddr);
3598
3599         /* Enable Power down mode by programming the PMT regs */
3600         if (device_may_wakeup(priv->device)) {
3601                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3602                 priv->irq_wake = 1;
3603         } else {
3604                 stmmac_set_mac(priv->ioaddr, false);
3605                 pinctrl_pm_select_sleep_state(priv->device);
3606                 /* Disable clock in case of PWM is off */
3607                 clk_disable(priv->plat->pclk);
3608                 clk_disable(priv->plat->stmmac_clk);
3609         }
3610         spin_unlock_irqrestore(&priv->lock, flags);
3611
3612         priv->oldlink = 0;
3613         priv->speed = SPEED_UNKNOWN;
3614         priv->oldduplex = DUPLEX_UNKNOWN;
3615         return 0;
3616 }
3617 EXPORT_SYMBOL_GPL(stmmac_suspend);
3618
3619 /**
3620  * stmmac_resume - resume callback
3621  * @dev: device pointer
3622  * Description: when resume this function is invoked to setup the DMA and CORE
3623  * in a usable state.
3624  */
3625 int stmmac_resume(struct device *dev)
3626 {
3627         struct net_device *ndev = dev_get_drvdata(dev);
3628         struct stmmac_priv *priv = netdev_priv(ndev);
3629         unsigned long flags;
3630
3631         if (!netif_running(ndev))
3632                 return 0;
3633
3634         /* Power Down bit, into the PM register, is cleared
3635          * automatically as soon as a magic packet or a Wake-up frame
3636          * is received. Anyway, it's better to manually clear
3637          * this bit because it can generate problems while resuming
3638          * from another devices (e.g. serial console).
3639          */
3640         if (device_may_wakeup(priv->device)) {
3641                 spin_lock_irqsave(&priv->lock, flags);
3642                 priv->hw->mac->pmt(priv->hw, 0);
3643                 spin_unlock_irqrestore(&priv->lock, flags);
3644                 priv->irq_wake = 0;
3645         } else {
3646                 pinctrl_pm_select_default_state(priv->device);
3647                 /* enable the clk previously disabled */
3648                 clk_enable(priv->plat->stmmac_clk);
3649                 clk_enable(priv->plat->pclk);
3650                 /* reset the phy so that it's ready */
3651                 if (priv->mii)
3652                         stmmac_mdio_reset(priv->mii);
3653         }
3654
3655         netif_device_attach(ndev);
3656
3657         spin_lock_irqsave(&priv->lock, flags);
3658
3659         priv->cur_rx = 0;
3660         priv->dirty_rx = 0;
3661         priv->dirty_tx = 0;
3662         priv->cur_tx = 0;
3663         /* reset private mss value to force mss context settings at
3664          * next tso xmit (only used for gmac4).
3665          */
3666         priv->mss = 0;
3667
3668         stmmac_clear_descriptors(priv);
3669
3670         stmmac_hw_setup(ndev, false);
3671         stmmac_init_tx_coalesce(priv);
3672         stmmac_set_rx_mode(ndev);
3673
3674         napi_enable(&priv->napi);
3675
3676         netif_start_queue(ndev);
3677
3678         spin_unlock_irqrestore(&priv->lock, flags);
3679
3680         if (ndev->phydev)
3681                 phy_start(ndev->phydev);
3682
3683         return 0;
3684 }
3685 EXPORT_SYMBOL_GPL(stmmac_resume);
3686
3687 #ifndef MODULE
3688 static int __init stmmac_cmdline_opt(char *str)
3689 {
3690         char *opt;
3691
3692         if (!str || !*str)
3693                 return -EINVAL;
3694         while ((opt = strsep(&str, ",")) != NULL) {
3695                 if (!strncmp(opt, "debug:", 6)) {
3696                         if (kstrtoint(opt + 6, 0, &debug))
3697                                 goto err;
3698                 } else if (!strncmp(opt, "phyaddr:", 8)) {
3699                         if (kstrtoint(opt + 8, 0, &phyaddr))
3700                                 goto err;
3701                 } else if (!strncmp(opt, "buf_sz:", 7)) {
3702                         if (kstrtoint(opt + 7, 0, &buf_sz))
3703                                 goto err;
3704                 } else if (!strncmp(opt, "tc:", 3)) {
3705                         if (kstrtoint(opt + 3, 0, &tc))
3706                                 goto err;
3707                 } else if (!strncmp(opt, "watchdog:", 9)) {
3708                         if (kstrtoint(opt + 9, 0, &watchdog))
3709                                 goto err;
3710                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3711                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3712                                 goto err;
3713                 } else if (!strncmp(opt, "pause:", 6)) {
3714                         if (kstrtoint(opt + 6, 0, &pause))
3715                                 goto err;
3716                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3717                         if (kstrtoint(opt + 10, 0, &eee_timer))
3718                                 goto err;
3719                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3720                         if (kstrtoint(opt + 11, 0, &chain_mode))
3721                                 goto err;
3722                 }
3723         }
3724         return 0;
3725
3726 err:
3727         pr_err("%s: ERROR broken module parameter conversion", __func__);
3728         return -EINVAL;
3729 }
3730
3731 __setup("stmmaceth=", stmmac_cmdline_opt);
3732 #endif /* MODULE */
3733
3734 static int __init stmmac_init(void)
3735 {
3736 #ifdef CONFIG_DEBUG_FS
3737         /* Create debugfs main directory if it doesn't exist yet */
3738         if (!stmmac_fs_dir) {
3739                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3740
3741                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3742                         pr_err("ERROR %s, debugfs create directory failed\n",
3743                                STMMAC_RESOURCE_NAME);
3744
3745                         return -ENOMEM;
3746                 }
3747         }
3748 #endif
3749
3750         return 0;
3751 }
3752
3753 static void __exit stmmac_exit(void)
3754 {
3755 #ifdef CONFIG_DEBUG_FS
3756         debugfs_remove_recursive(stmmac_fs_dir);
3757 #endif
3758 }
3759
3760 module_init(stmmac_init)
3761 module_exit(stmmac_exit)
3762
3763 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3764 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3765 MODULE_LICENSE("GPL");