]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/freescale/gianfar_ethtool.c
net: fec: call dma_mapping_error() where appropriate
[karo-tx-linux.git] / drivers / net / ethernet / freescale / gianfar_ethtool.c
1 /*
2  *  drivers/net/ethernet/freescale/gianfar_ethtool.c
3  *
4  *  Gianfar Ethernet Driver
5  *  Ethtool support for Gianfar Enet
6  *  Based on e1000 ethtool support
7  *
8  *  Author: Andy Fleming
9  *  Maintainer: Kumar Gala
10  *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
13  *
14  *  This software may be used and distributed according to
15  *  the terms of the GNU Public License, Version 2, incorporated herein
16  *  by reference.
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/skbuff.h>
31 #include <linux/spinlock.h>
32 #include <linux/mm.h>
33
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/uaccess.h>
37 #include <linux/module.h>
38 #include <linux/crc32.h>
39 #include <asm/types.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/phy.h>
43 #include <linux/sort.h>
44 #include <linux/if_vlan.h>
45
46 #include "gianfar.h"
47
48 extern void gfar_start(struct net_device *dev);
49 extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
50                               int rx_work_limit);
51
52 #define GFAR_MAX_COAL_USECS 0xffff
53 #define GFAR_MAX_COAL_FRAMES 0xff
54 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
55                             u64 *buf);
56 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
57 static int gfar_gcoalesce(struct net_device *dev,
58                           struct ethtool_coalesce *cvals);
59 static int gfar_scoalesce(struct net_device *dev,
60                           struct ethtool_coalesce *cvals);
61 static void gfar_gringparam(struct net_device *dev,
62                             struct ethtool_ringparam *rvals);
63 static int gfar_sringparam(struct net_device *dev,
64                            struct ethtool_ringparam *rvals);
65 static void gfar_gdrvinfo(struct net_device *dev,
66                           struct ethtool_drvinfo *drvinfo);
67
68 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
69         "rx-large-frame-errors",
70         "rx-short-frame-errors",
71         "rx-non-octet-errors",
72         "rx-crc-errors",
73         "rx-overrun-errors",
74         "rx-busy-errors",
75         "rx-babbling-errors",
76         "rx-truncated-frames",
77         "ethernet-bus-error",
78         "tx-babbling-errors",
79         "tx-underrun-errors",
80         "rx-skb-missing-errors",
81         "tx-timeout-errors",
82         "tx-rx-64-frames",
83         "tx-rx-65-127-frames",
84         "tx-rx-128-255-frames",
85         "tx-rx-256-511-frames",
86         "tx-rx-512-1023-frames",
87         "tx-rx-1024-1518-frames",
88         "tx-rx-1519-1522-good-vlan",
89         "rx-bytes",
90         "rx-packets",
91         "rx-fcs-errors",
92         "receive-multicast-packet",
93         "receive-broadcast-packet",
94         "rx-control-frame-packets",
95         "rx-pause-frame-packets",
96         "rx-unknown-op-code",
97         "rx-alignment-error",
98         "rx-frame-length-error",
99         "rx-code-error",
100         "rx-carrier-sense-error",
101         "rx-undersize-packets",
102         "rx-oversize-packets",
103         "rx-fragmented-frames",
104         "rx-jabber-frames",
105         "rx-dropped-frames",
106         "tx-byte-counter",
107         "tx-packets",
108         "tx-multicast-packets",
109         "tx-broadcast-packets",
110         "tx-pause-control-frames",
111         "tx-deferral-packets",
112         "tx-excessive-deferral-packets",
113         "tx-single-collision-packets",
114         "tx-multiple-collision-packets",
115         "tx-late-collision-packets",
116         "tx-excessive-collision-packets",
117         "tx-total-collision",
118         "reserved",
119         "tx-dropped-frames",
120         "tx-jabber-frames",
121         "tx-fcs-errors",
122         "tx-control-frames",
123         "tx-oversize-frames",
124         "tx-undersize-frames",
125         "tx-fragmented-frames",
126 };
127
128 /* Fill in a buffer with the strings which correspond to the
129  * stats */
130 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
131 {
132         struct gfar_private *priv = netdev_priv(dev);
133
134         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
135                 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
136         else
137                 memcpy(buf, stat_gstrings,
138                        GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
139 }
140
141 /* Fill in an array of 64-bit statistics from various sources.
142  * This array will be appended to the end of the ethtool_stats
143  * structure, and returned to user space
144  */
145 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
146                             u64 *buf)
147 {
148         int i;
149         struct gfar_private *priv = netdev_priv(dev);
150         struct gfar __iomem *regs = priv->gfargrp[0].regs;
151         atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
152
153         for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
154                 buf[i] = atomic64_read(&extra[i]);
155
156         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
157                 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
158
159                 for (; i < GFAR_STATS_LEN; i++, rmon++)
160                         buf[i] = (u64) gfar_read(rmon);
161         }
162 }
163
164 static int gfar_sset_count(struct net_device *dev, int sset)
165 {
166         struct gfar_private *priv = netdev_priv(dev);
167
168         switch (sset) {
169         case ETH_SS_STATS:
170                 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
171                         return GFAR_STATS_LEN;
172                 else
173                         return GFAR_EXTRA_STATS_LEN;
174         default:
175                 return -EOPNOTSUPP;
176         }
177 }
178
179 /* Fills in the drvinfo structure with some basic info */
180 static void gfar_gdrvinfo(struct net_device *dev,
181                           struct ethtool_drvinfo *drvinfo)
182 {
183         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
184         strlcpy(drvinfo->version, gfar_driver_version,
185                 sizeof(drvinfo->version));
186         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
187         strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
188         drvinfo->regdump_len = 0;
189         drvinfo->eedump_len = 0;
190 }
191
192
193 static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
194 {
195         struct gfar_private *priv = netdev_priv(dev);
196         struct phy_device *phydev = priv->phydev;
197
198         if (NULL == phydev)
199                 return -ENODEV;
200
201         return phy_ethtool_sset(phydev, cmd);
202 }
203
204
205 /* Return the current settings in the ethtool_cmd structure */
206 static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
207 {
208         struct gfar_private *priv = netdev_priv(dev);
209         struct phy_device *phydev = priv->phydev;
210         struct gfar_priv_rx_q *rx_queue = NULL;
211         struct gfar_priv_tx_q *tx_queue = NULL;
212
213         if (NULL == phydev)
214                 return -ENODEV;
215         tx_queue = priv->tx_queue[0];
216         rx_queue = priv->rx_queue[0];
217
218         /* etsec-1.7 and older versions have only one txic
219          * and rxic regs although they support multiple queues */
220         cmd->maxtxpkt = get_icft_value(tx_queue->txic);
221         cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
222
223         return phy_ethtool_gset(phydev, cmd);
224 }
225
226 /* Return the length of the register structure */
227 static int gfar_reglen(struct net_device *dev)
228 {
229         return sizeof (struct gfar);
230 }
231
232 /* Return a dump of the GFAR register space */
233 static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
234                           void *regbuf)
235 {
236         int i;
237         struct gfar_private *priv = netdev_priv(dev);
238         u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
239         u32 *buf = (u32 *) regbuf;
240
241         for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
242                 buf[i] = gfar_read(&theregs[i]);
243 }
244
245 /* Convert microseconds to ethernet clock ticks, which changes
246  * depending on what speed the controller is running at */
247 static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
248                                      unsigned int usecs)
249 {
250         unsigned int count;
251
252         /* The timer is different, depending on the interface speed */
253         switch (priv->phydev->speed) {
254         case SPEED_1000:
255                 count = GFAR_GBIT_TIME;
256                 break;
257         case SPEED_100:
258                 count = GFAR_100_TIME;
259                 break;
260         case SPEED_10:
261         default:
262                 count = GFAR_10_TIME;
263                 break;
264         }
265
266         /* Make sure we return a number greater than 0
267          * if usecs > 0 */
268         return (usecs * 1000 + count - 1) / count;
269 }
270
271 /* Convert ethernet clock ticks to microseconds */
272 static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
273                                      unsigned int ticks)
274 {
275         unsigned int count;
276
277         /* The timer is different, depending on the interface speed */
278         switch (priv->phydev->speed) {
279         case SPEED_1000:
280                 count = GFAR_GBIT_TIME;
281                 break;
282         case SPEED_100:
283                 count = GFAR_100_TIME;
284                 break;
285         case SPEED_10:
286         default:
287                 count = GFAR_10_TIME;
288                 break;
289         }
290
291         /* Make sure we return a number greater than 0 */
292         /* if ticks is > 0 */
293         return (ticks * count) / 1000;
294 }
295
296 /* Get the coalescing parameters, and put them in the cvals
297  * structure.  */
298 static int gfar_gcoalesce(struct net_device *dev,
299                           struct ethtool_coalesce *cvals)
300 {
301         struct gfar_private *priv = netdev_priv(dev);
302         struct gfar_priv_rx_q *rx_queue = NULL;
303         struct gfar_priv_tx_q *tx_queue = NULL;
304         unsigned long rxtime;
305         unsigned long rxcount;
306         unsigned long txtime;
307         unsigned long txcount;
308
309         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
310                 return -EOPNOTSUPP;
311
312         if (NULL == priv->phydev)
313                 return -ENODEV;
314
315         rx_queue = priv->rx_queue[0];
316         tx_queue = priv->tx_queue[0];
317
318         rxtime  = get_ictt_value(rx_queue->rxic);
319         rxcount = get_icft_value(rx_queue->rxic);
320         txtime  = get_ictt_value(tx_queue->txic);
321         txcount = get_icft_value(tx_queue->txic);
322         cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
323         cvals->rx_max_coalesced_frames = rxcount;
324
325         cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
326         cvals->tx_max_coalesced_frames = txcount;
327
328         cvals->use_adaptive_rx_coalesce = 0;
329         cvals->use_adaptive_tx_coalesce = 0;
330
331         cvals->pkt_rate_low = 0;
332         cvals->rx_coalesce_usecs_low = 0;
333         cvals->rx_max_coalesced_frames_low = 0;
334         cvals->tx_coalesce_usecs_low = 0;
335         cvals->tx_max_coalesced_frames_low = 0;
336
337         /* When the packet rate is below pkt_rate_high but above
338          * pkt_rate_low (both measured in packets per second) the
339          * normal {rx,tx}_* coalescing parameters are used.
340          */
341
342         /* When the packet rate is (measured in packets per second)
343          * is above pkt_rate_high, the {rx,tx}_*_high parameters are
344          * used.
345          */
346         cvals->pkt_rate_high = 0;
347         cvals->rx_coalesce_usecs_high = 0;
348         cvals->rx_max_coalesced_frames_high = 0;
349         cvals->tx_coalesce_usecs_high = 0;
350         cvals->tx_max_coalesced_frames_high = 0;
351
352         /* How often to do adaptive coalescing packet rate sampling,
353          * measured in seconds.  Must not be zero.
354          */
355         cvals->rate_sample_interval = 0;
356
357         return 0;
358 }
359
360 /* Change the coalescing values.
361  * Both cvals->*_usecs and cvals->*_frames have to be > 0
362  * in order for coalescing to be active
363  */
364 static int gfar_scoalesce(struct net_device *dev,
365                           struct ethtool_coalesce *cvals)
366 {
367         struct gfar_private *priv = netdev_priv(dev);
368         int i = 0;
369
370         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
371                 return -EOPNOTSUPP;
372
373         /* Set up rx coalescing */
374         /* As of now, we will enable/disable coalescing for all
375          * queues together in case of eTSEC2, this will be modified
376          * along with the ethtool interface
377          */
378         if ((cvals->rx_coalesce_usecs == 0) ||
379             (cvals->rx_max_coalesced_frames == 0)) {
380                 for (i = 0; i < priv->num_rx_queues; i++)
381                         priv->rx_queue[i]->rxcoalescing = 0;
382         } else {
383                 for (i = 0; i < priv->num_rx_queues; i++)
384                         priv->rx_queue[i]->rxcoalescing = 1;
385         }
386
387         if (NULL == priv->phydev)
388                 return -ENODEV;
389
390         /* Check the bounds of the values */
391         if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
392                 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
393                             GFAR_MAX_COAL_USECS);
394                 return -EINVAL;
395         }
396
397         if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
398                 netdev_info(dev, "Coalescing is limited to %d frames\n",
399                             GFAR_MAX_COAL_FRAMES);
400                 return -EINVAL;
401         }
402
403         for (i = 0; i < priv->num_rx_queues; i++) {
404                 priv->rx_queue[i]->rxic = mk_ic_value(
405                         cvals->rx_max_coalesced_frames,
406                         gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
407         }
408
409         /* Set up tx coalescing */
410         if ((cvals->tx_coalesce_usecs == 0) ||
411             (cvals->tx_max_coalesced_frames == 0)) {
412                 for (i = 0; i < priv->num_tx_queues; i++)
413                         priv->tx_queue[i]->txcoalescing = 0;
414         } else {
415                 for (i = 0; i < priv->num_tx_queues; i++)
416                         priv->tx_queue[i]->txcoalescing = 1;
417         }
418
419         /* Check the bounds of the values */
420         if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
421                 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
422                             GFAR_MAX_COAL_USECS);
423                 return -EINVAL;
424         }
425
426         if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
427                 netdev_info(dev, "Coalescing is limited to %d frames\n",
428                             GFAR_MAX_COAL_FRAMES);
429                 return -EINVAL;
430         }
431
432         for (i = 0; i < priv->num_tx_queues; i++) {
433                 priv->tx_queue[i]->txic = mk_ic_value(
434                         cvals->tx_max_coalesced_frames,
435                         gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
436         }
437
438         gfar_configure_coalescing_all(priv);
439
440         return 0;
441 }
442
443 /* Fills in rvals with the current ring parameters.  Currently,
444  * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
445  * jumbo are ignored by the driver */
446 static void gfar_gringparam(struct net_device *dev,
447                             struct ethtool_ringparam *rvals)
448 {
449         struct gfar_private *priv = netdev_priv(dev);
450         struct gfar_priv_tx_q *tx_queue = NULL;
451         struct gfar_priv_rx_q *rx_queue = NULL;
452
453         tx_queue = priv->tx_queue[0];
454         rx_queue = priv->rx_queue[0];
455
456         rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
457         rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
458         rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
459         rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
460
461         /* Values changeable by the user.  The valid values are
462          * in the range 1 to the "*_max_pending" counterpart above.
463          */
464         rvals->rx_pending = rx_queue->rx_ring_size;
465         rvals->rx_mini_pending = rx_queue->rx_ring_size;
466         rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
467         rvals->tx_pending = tx_queue->tx_ring_size;
468 }
469
470 /* Change the current ring parameters, stopping the controller if
471  * necessary so that we don't mess things up while we're in
472  * motion.  We wait for the ring to be clean before reallocating
473  * the rings.
474  */
475 static int gfar_sringparam(struct net_device *dev,
476                            struct ethtool_ringparam *rvals)
477 {
478         struct gfar_private *priv = netdev_priv(dev);
479         int err = 0, i = 0;
480
481         if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
482                 return -EINVAL;
483
484         if (!is_power_of_2(rvals->rx_pending)) {
485                 netdev_err(dev, "Ring sizes must be a power of 2\n");
486                 return -EINVAL;
487         }
488
489         if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
490                 return -EINVAL;
491
492         if (!is_power_of_2(rvals->tx_pending)) {
493                 netdev_err(dev, "Ring sizes must be a power of 2\n");
494                 return -EINVAL;
495         }
496
497
498         if (dev->flags & IFF_UP) {
499                 unsigned long flags;
500
501                 /* Halt TX and RX, and process the frames which
502                  * have already been received
503                  */
504                 local_irq_save(flags);
505                 lock_tx_qs(priv);
506                 lock_rx_qs(priv);
507
508                 gfar_halt(dev);
509
510                 unlock_rx_qs(priv);
511                 unlock_tx_qs(priv);
512                 local_irq_restore(flags);
513
514                 for (i = 0; i < priv->num_rx_queues; i++)
515                         gfar_clean_rx_ring(priv->rx_queue[i],
516                                            priv->rx_queue[i]->rx_ring_size);
517
518                 /* Now we take down the rings to rebuild them */
519                 stop_gfar(dev);
520         }
521
522         /* Change the size */
523         for (i = 0; i < priv->num_rx_queues; i++) {
524                 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
525                 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
526                 priv->tx_queue[i]->num_txbdfree =
527                         priv->tx_queue[i]->tx_ring_size;
528         }
529
530         /* Rebuild the rings with the new size */
531         if (dev->flags & IFF_UP) {
532                 err = startup_gfar(dev);
533                 netif_tx_wake_all_queues(dev);
534         }
535         return err;
536 }
537
538 static void gfar_gpauseparam(struct net_device *dev,
539                              struct ethtool_pauseparam *epause)
540 {
541         struct gfar_private *priv = netdev_priv(dev);
542
543         epause->autoneg = !!priv->pause_aneg_en;
544         epause->rx_pause = !!priv->rx_pause_en;
545         epause->tx_pause = !!priv->tx_pause_en;
546 }
547
548 static int gfar_spauseparam(struct net_device *dev,
549                             struct ethtool_pauseparam *epause)
550 {
551         struct gfar_private *priv = netdev_priv(dev);
552         struct phy_device *phydev = priv->phydev;
553         struct gfar __iomem *regs = priv->gfargrp[0].regs;
554         u32 oldadv, newadv;
555
556         if (!(phydev->supported & SUPPORTED_Pause) ||
557             (!(phydev->supported & SUPPORTED_Asym_Pause) &&
558              (epause->rx_pause != epause->tx_pause)))
559                 return -EINVAL;
560
561         priv->rx_pause_en = priv->tx_pause_en = 0;
562         if (epause->rx_pause) {
563                 priv->rx_pause_en = 1;
564
565                 if (epause->tx_pause) {
566                         priv->tx_pause_en = 1;
567                         /* FLOW_CTRL_RX & TX */
568                         newadv = ADVERTISED_Pause;
569                 } else  /* FLOW_CTLR_RX */
570                         newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
571         } else if (epause->tx_pause) {
572                 priv->tx_pause_en = 1;
573                 /* FLOW_CTLR_TX */
574                 newadv = ADVERTISED_Asym_Pause;
575         } else
576                 newadv = 0;
577
578         if (epause->autoneg)
579                 priv->pause_aneg_en = 1;
580         else
581                 priv->pause_aneg_en = 0;
582
583         oldadv = phydev->advertising &
584                 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
585         if (oldadv != newadv) {
586                 phydev->advertising &=
587                         ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
588                 phydev->advertising |= newadv;
589                 if (phydev->autoneg)
590                         /* inform link partner of our
591                          * new flow ctrl settings
592                          */
593                         return phy_start_aneg(phydev);
594
595                 if (!epause->autoneg) {
596                         u32 tempval;
597                         tempval = gfar_read(&regs->maccfg1);
598                         tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
599                         if (priv->tx_pause_en)
600                                 tempval |= MACCFG1_TX_FLOW;
601                         if (priv->rx_pause_en)
602                                 tempval |= MACCFG1_RX_FLOW;
603                         gfar_write(&regs->maccfg1, tempval);
604                 }
605         }
606
607         return 0;
608 }
609
610 int gfar_set_features(struct net_device *dev, netdev_features_t features)
611 {
612         struct gfar_private *priv = netdev_priv(dev);
613         unsigned long flags;
614         int err = 0, i = 0;
615         netdev_features_t changed = dev->features ^ features;
616
617         if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
618                 gfar_vlan_mode(dev, features);
619
620         if (!(changed & NETIF_F_RXCSUM))
621                 return 0;
622
623         if (dev->flags & IFF_UP) {
624                 /* Halt TX and RX, and process the frames which
625                  * have already been received
626                  */
627                 local_irq_save(flags);
628                 lock_tx_qs(priv);
629                 lock_rx_qs(priv);
630
631                 gfar_halt(dev);
632
633                 unlock_tx_qs(priv);
634                 unlock_rx_qs(priv);
635                 local_irq_restore(flags);
636
637                 for (i = 0; i < priv->num_rx_queues; i++)
638                         gfar_clean_rx_ring(priv->rx_queue[i],
639                                            priv->rx_queue[i]->rx_ring_size);
640
641                 /* Now we take down the rings to rebuild them */
642                 stop_gfar(dev);
643
644                 dev->features = features;
645
646                 err = startup_gfar(dev);
647                 netif_tx_wake_all_queues(dev);
648         }
649         return err;
650 }
651
652 static uint32_t gfar_get_msglevel(struct net_device *dev)
653 {
654         struct gfar_private *priv = netdev_priv(dev);
655
656         return priv->msg_enable;
657 }
658
659 static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
660 {
661         struct gfar_private *priv = netdev_priv(dev);
662
663         priv->msg_enable = data;
664 }
665
666 #ifdef CONFIG_PM
667 static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
668 {
669         struct gfar_private *priv = netdev_priv(dev);
670
671         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
672                 wol->supported = WAKE_MAGIC;
673                 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
674         } else {
675                 wol->supported = wol->wolopts = 0;
676         }
677 }
678
679 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
680 {
681         struct gfar_private *priv = netdev_priv(dev);
682         unsigned long flags;
683
684         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
685             wol->wolopts != 0)
686                 return -EINVAL;
687
688         if (wol->wolopts & ~WAKE_MAGIC)
689                 return -EINVAL;
690
691         device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
692
693         spin_lock_irqsave(&priv->bflock, flags);
694         priv->wol_en =  !!device_may_wakeup(&dev->dev);
695         spin_unlock_irqrestore(&priv->bflock, flags);
696
697         return 0;
698 }
699 #endif
700
701 static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
702 {
703         u32 fcr = 0x0, fpr = FPR_FILER_MASK;
704
705         if (ethflow & RXH_L2DA) {
706                 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
707                       RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
708                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
709                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
710                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
711                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
712
713                 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
714                       RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
715                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
716                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
717                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
718                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
719         }
720
721         if (ethflow & RXH_VLAN) {
722                 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
723                       RQFCR_AND | RQFCR_HASHTBL_0;
724                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
725                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
726                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
727                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
728         }
729
730         if (ethflow & RXH_IP_SRC) {
731                 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
732                       RQFCR_AND | RQFCR_HASHTBL_0;
733                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
734                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
735                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
736                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
737         }
738
739         if (ethflow & (RXH_IP_DST)) {
740                 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
741                       RQFCR_AND | RQFCR_HASHTBL_0;
742                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
743                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
744                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
745                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
746         }
747
748         if (ethflow & RXH_L3_PROTO) {
749                 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
750                       RQFCR_AND | RQFCR_HASHTBL_0;
751                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
752                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
753                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
754                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
755         }
756
757         if (ethflow & RXH_L4_B_0_1) {
758                 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
759                       RQFCR_AND | RQFCR_HASHTBL_0;
760                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
761                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
762                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
763                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
764         }
765
766         if (ethflow & RXH_L4_B_2_3) {
767                 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
768                       RQFCR_AND | RQFCR_HASHTBL_0;
769                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
770                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
771                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
772                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
773         }
774 }
775
776 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
777                                        u64 class)
778 {
779         unsigned int last_rule_idx = priv->cur_filer_idx;
780         unsigned int cmp_rqfpr;
781         unsigned int *local_rqfpr;
782         unsigned int *local_rqfcr;
783         int i = 0x0, k = 0x0;
784         int j = MAX_FILER_IDX, l = 0x0;
785         int ret = 1;
786
787         local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
788                                     GFP_KERNEL);
789         local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
790                                     GFP_KERNEL);
791         if (!local_rqfpr || !local_rqfcr) {
792                 ret = 0;
793                 goto err;
794         }
795
796         switch (class) {
797         case TCP_V4_FLOW:
798                 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
799                 break;
800         case UDP_V4_FLOW:
801                 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
802                 break;
803         case TCP_V6_FLOW:
804                 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
805                 break;
806         case UDP_V6_FLOW:
807                 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
808                 break;
809         default:
810                 netdev_err(priv->ndev,
811                            "Right now this class is not supported\n");
812                 ret = 0;
813                 goto err;
814         }
815
816         for (i = 0; i < MAX_FILER_IDX + 1; i++) {
817                 local_rqfpr[j] = priv->ftp_rqfpr[i];
818                 local_rqfcr[j] = priv->ftp_rqfcr[i];
819                 j--;
820                 if ((priv->ftp_rqfcr[i] ==
821                      (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
822                     (priv->ftp_rqfpr[i] == cmp_rqfpr))
823                         break;
824         }
825
826         if (i == MAX_FILER_IDX + 1) {
827                 netdev_err(priv->ndev,
828                            "No parse rule found, can't create hash rules\n");
829                 ret = 0;
830                 goto err;
831         }
832
833         /* If a match was found, then it begins the starting of a cluster rule
834          * if it was already programmed, we need to overwrite these rules
835          */
836         for (l = i+1; l < MAX_FILER_IDX; l++) {
837                 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
838                     !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
839                         priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
840                                              RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
841                         priv->ftp_rqfpr[l] = FPR_FILER_MASK;
842                         gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
843                                          priv->ftp_rqfpr[l]);
844                         break;
845                 }
846
847                 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
848                         (priv->ftp_rqfcr[l] & RQFCR_AND))
849                         continue;
850                 else {
851                         local_rqfpr[j] = priv->ftp_rqfpr[l];
852                         local_rqfcr[j] = priv->ftp_rqfcr[l];
853                         j--;
854                 }
855         }
856
857         priv->cur_filer_idx = l - 1;
858         last_rule_idx = l;
859
860         /* hash rules */
861         ethflow_to_filer_rules(priv, ethflow);
862
863         /* Write back the popped out rules again */
864         for (k = j+1; k < MAX_FILER_IDX; k++) {
865                 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
866                 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
867                 gfar_write_filer(priv, priv->cur_filer_idx,
868                                  local_rqfcr[k], local_rqfpr[k]);
869                 if (!priv->cur_filer_idx)
870                         break;
871                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
872         }
873
874 err:
875         kfree(local_rqfcr);
876         kfree(local_rqfpr);
877         return ret;
878 }
879
880 static int gfar_set_hash_opts(struct gfar_private *priv,
881                               struct ethtool_rxnfc *cmd)
882 {
883         /* write the filer rules here */
884         if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
885                 return -EINVAL;
886
887         return 0;
888 }
889
890 static int gfar_check_filer_hardware(struct gfar_private *priv)
891 {
892         struct gfar __iomem *regs = NULL;
893         u32 i;
894
895         regs = priv->gfargrp[0].regs;
896
897         /* Check if we are in FIFO mode */
898         i = gfar_read(&regs->ecntrl);
899         i &= ECNTRL_FIFM;
900         if (i == ECNTRL_FIFM) {
901                 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
902                 i = gfar_read(&regs->rctrl);
903                 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
904                 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
905                         netdev_info(priv->ndev,
906                                     "Receive Queue Filtering enabled\n");
907                 } else {
908                         netdev_warn(priv->ndev,
909                                     "Receive Queue Filtering disabled\n");
910                         return -EOPNOTSUPP;
911                 }
912         }
913         /* Or in standard mode */
914         else {
915                 i = gfar_read(&regs->rctrl);
916                 i &= RCTRL_PRSDEP_MASK;
917                 if (i == RCTRL_PRSDEP_MASK) {
918                         netdev_info(priv->ndev,
919                                     "Receive Queue Filtering enabled\n");
920                 } else {
921                         netdev_warn(priv->ndev,
922                                     "Receive Queue Filtering disabled\n");
923                         return -EOPNOTSUPP;
924                 }
925         }
926
927         /* Sets the properties for arbitrary filer rule
928          * to the first 4 Layer 4 Bytes
929          */
930         regs->rbifx = 0xC0C1C2C3;
931         return 0;
932 }
933
934 static int gfar_comp_asc(const void *a, const void *b)
935 {
936         return memcmp(a, b, 4);
937 }
938
939 static int gfar_comp_desc(const void *a, const void *b)
940 {
941         return -memcmp(a, b, 4);
942 }
943
944 static void gfar_swap(void *a, void *b, int size)
945 {
946         u32 *_a = a;
947         u32 *_b = b;
948
949         swap(_a[0], _b[0]);
950         swap(_a[1], _b[1]);
951         swap(_a[2], _b[2]);
952         swap(_a[3], _b[3]);
953 }
954
955 /* Write a mask to filer cache */
956 static void gfar_set_mask(u32 mask, struct filer_table *tab)
957 {
958         tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
959         tab->fe[tab->index].prop = mask;
960         tab->index++;
961 }
962
963 /* Sets parse bits (e.g. IP or TCP) */
964 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
965 {
966         gfar_set_mask(mask, tab);
967         tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
968                                    RQFCR_AND;
969         tab->fe[tab->index].prop = value;
970         tab->index++;
971 }
972
973 static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
974                                        struct filer_table *tab)
975 {
976         gfar_set_mask(mask, tab);
977         tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
978         tab->fe[tab->index].prop = value;
979         tab->index++;
980 }
981
982 /* For setting a tuple of value and mask of type flag
983  * Example:
984  * IP-Src = 10.0.0.0/255.0.0.0
985  * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
986  *
987  * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
988  * For a don't care mask it gives us a 0
989  *
990  * The check if don't care and the mask adjustment if mask=0 is done for VLAN
991  * and MAC stuff on an upper level (due to missing information on this level).
992  * For these guys we can discard them if they are value=0 and mask=0.
993  *
994  * Further the all masks are one-padded for better hardware efficiency.
995  */
996 static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
997                                struct filer_table *tab)
998 {
999         switch (flag) {
1000                 /* 3bit */
1001         case RQFCR_PID_PRI:
1002                 if (!(value | mask))
1003                         return;
1004                 mask |= RQFCR_PID_PRI_MASK;
1005                 break;
1006                 /* 8bit */
1007         case RQFCR_PID_L4P:
1008         case RQFCR_PID_TOS:
1009                 if (!~(mask | RQFCR_PID_L4P_MASK))
1010                         return;
1011                 if (!mask)
1012                         mask = ~0;
1013                 else
1014                         mask |= RQFCR_PID_L4P_MASK;
1015                 break;
1016                 /* 12bit */
1017         case RQFCR_PID_VID:
1018                 if (!(value | mask))
1019                         return;
1020                 mask |= RQFCR_PID_VID_MASK;
1021                 break;
1022                 /* 16bit */
1023         case RQFCR_PID_DPT:
1024         case RQFCR_PID_SPT:
1025         case RQFCR_PID_ETY:
1026                 if (!~(mask | RQFCR_PID_PORT_MASK))
1027                         return;
1028                 if (!mask)
1029                         mask = ~0;
1030                 else
1031                         mask |= RQFCR_PID_PORT_MASK;
1032                 break;
1033                 /* 24bit */
1034         case RQFCR_PID_DAH:
1035         case RQFCR_PID_DAL:
1036         case RQFCR_PID_SAH:
1037         case RQFCR_PID_SAL:
1038                 if (!(value | mask))
1039                         return;
1040                 mask |= RQFCR_PID_MAC_MASK;
1041                 break;
1042                 /* for all real 32bit masks */
1043         default:
1044                 if (!~mask)
1045                         return;
1046                 if (!mask)
1047                         mask = ~0;
1048                 break;
1049         }
1050         gfar_set_general_attribute(value, mask, flag, tab);
1051 }
1052
1053 /* Translates value and mask for UDP, TCP or SCTP */
1054 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
1055                               struct ethtool_tcpip4_spec *mask,
1056                               struct filer_table *tab)
1057 {
1058         gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
1059         gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
1060         gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
1061         gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
1062         gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1063 }
1064
1065 /* Translates value and mask for RAW-IP4 */
1066 static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
1067                              struct ethtool_usrip4_spec *mask,
1068                              struct filer_table *tab)
1069 {
1070         gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
1071         gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
1072         gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1073         gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
1074         gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
1075                            tab);
1076
1077 }
1078
1079 /* Translates value and mask for ETHER spec */
1080 static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
1081                            struct filer_table *tab)
1082 {
1083         u32 upper_temp_mask = 0;
1084         u32 lower_temp_mask = 0;
1085
1086         /* Source address */
1087         if (!is_broadcast_ether_addr(mask->h_source)) {
1088                 if (is_zero_ether_addr(mask->h_source)) {
1089                         upper_temp_mask = 0xFFFFFFFF;
1090                         lower_temp_mask = 0xFFFFFFFF;
1091                 } else {
1092                         upper_temp_mask = mask->h_source[0] << 16 |
1093                                           mask->h_source[1] << 8  |
1094                                           mask->h_source[2];
1095                         lower_temp_mask = mask->h_source[3] << 16 |
1096                                           mask->h_source[4] << 8  |
1097                                           mask->h_source[5];
1098                 }
1099                 /* Upper 24bit */
1100                 gfar_set_attribute(value->h_source[0] << 16 |
1101                                    value->h_source[1] << 8  |
1102                                    value->h_source[2],
1103                                    upper_temp_mask, RQFCR_PID_SAH, tab);
1104                 /* And the same for the lower part */
1105                 gfar_set_attribute(value->h_source[3] << 16 |
1106                                    value->h_source[4] << 8  |
1107                                    value->h_source[5],
1108                                    lower_temp_mask, RQFCR_PID_SAL, tab);
1109         }
1110         /* Destination address */
1111         if (!is_broadcast_ether_addr(mask->h_dest)) {
1112                 /* Special for destination is limited broadcast */
1113                 if ((is_broadcast_ether_addr(value->h_dest) &&
1114                     is_zero_ether_addr(mask->h_dest))) {
1115                         gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1116                 } else {
1117                         if (is_zero_ether_addr(mask->h_dest)) {
1118                                 upper_temp_mask = 0xFFFFFFFF;
1119                                 lower_temp_mask = 0xFFFFFFFF;
1120                         } else {
1121                                 upper_temp_mask = mask->h_dest[0] << 16 |
1122                                                   mask->h_dest[1] << 8  |
1123                                                   mask->h_dest[2];
1124                                 lower_temp_mask = mask->h_dest[3] << 16 |
1125                                                   mask->h_dest[4] << 8  |
1126                                                   mask->h_dest[5];
1127                         }
1128
1129                         /* Upper 24bit */
1130                         gfar_set_attribute(value->h_dest[0] << 16 |
1131                                            value->h_dest[1] << 8  |
1132                                            value->h_dest[2],
1133                                            upper_temp_mask, RQFCR_PID_DAH, tab);
1134                         /* And the same for the lower part */
1135                         gfar_set_attribute(value->h_dest[3] << 16 |
1136                                            value->h_dest[4] << 8  |
1137                                            value->h_dest[5],
1138                                            lower_temp_mask, RQFCR_PID_DAL, tab);
1139                 }
1140         }
1141
1142         gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
1143 }
1144
1145 /* Convert a rule to binary filter format of gianfar */
1146 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1147                                  struct filer_table *tab)
1148 {
1149         u32 vlan = 0, vlan_mask = 0;
1150         u32 id = 0, id_mask = 0;
1151         u32 cfi = 0, cfi_mask = 0;
1152         u32 prio = 0, prio_mask = 0;
1153         u32 old_index = tab->index;
1154
1155         /* Check if vlan is wanted */
1156         if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
1157                 if (!rule->m_ext.vlan_tci)
1158                         rule->m_ext.vlan_tci = 0xFFFF;
1159
1160                 vlan = RQFPR_VLN;
1161                 vlan_mask = RQFPR_VLN;
1162
1163                 /* Separate the fields */
1164                 id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
1165                 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
1166                 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
1167                 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
1168                 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
1169                        VLAN_PRIO_SHIFT;
1170                 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
1171                             VLAN_PRIO_SHIFT;
1172
1173                 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
1174                         vlan |= RQFPR_CFI;
1175                         vlan_mask |= RQFPR_CFI;
1176                 } else if (cfi != VLAN_TAG_PRESENT &&
1177                            cfi_mask == VLAN_TAG_PRESENT) {
1178                         vlan_mask |= RQFPR_CFI;
1179                 }
1180         }
1181
1182         switch (rule->flow_type & ~FLOW_EXT) {
1183         case TCP_V4_FLOW:
1184                 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1185                                     RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1186                 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1187                                   &rule->m_u.tcp_ip4_spec, tab);
1188                 break;
1189         case UDP_V4_FLOW:
1190                 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1191                                     RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1192                 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1193                                   &rule->m_u.udp_ip4_spec, tab);
1194                 break;
1195         case SCTP_V4_FLOW:
1196                 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1197                                     tab);
1198                 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1199                 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1200                                   (struct ethtool_tcpip4_spec *)&rule->m_u,
1201                                   tab);
1202                 break;
1203         case IP_USER_FLOW:
1204                 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1205                                     tab);
1206                 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1207                                  (struct ethtool_usrip4_spec *) &rule->m_u,
1208                                  tab);
1209                 break;
1210         case ETHER_FLOW:
1211                 if (vlan)
1212                         gfar_set_parse_bits(vlan, vlan_mask, tab);
1213                 gfar_set_ether((struct ethhdr *) &rule->h_u,
1214                                (struct ethhdr *) &rule->m_u, tab);
1215                 break;
1216         default:
1217                 return -1;
1218         }
1219
1220         /* Set the vlan attributes in the end */
1221         if (vlan) {
1222                 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1223                 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1224         }
1225
1226         /* If there has been nothing written till now, it must be a default */
1227         if (tab->index == old_index) {
1228                 gfar_set_mask(0xFFFFFFFF, tab);
1229                 tab->fe[tab->index].ctrl = 0x20;
1230                 tab->fe[tab->index].prop = 0x0;
1231                 tab->index++;
1232         }
1233
1234         /* Remove last AND */
1235         tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1236
1237         /* Specify which queue to use or to drop */
1238         if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1239                 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1240         else
1241                 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1242
1243         /* Only big enough entries can be clustered */
1244         if (tab->index > (old_index + 2)) {
1245                 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1246                 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1247         }
1248
1249         /* In rare cases the cache can be full while there is
1250          * free space in hw
1251          */
1252         if (tab->index > MAX_FILER_CACHE_IDX - 1)
1253                 return -EBUSY;
1254
1255         return 0;
1256 }
1257
1258 /* Copy size filer entries */
1259 static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1260                                     struct gfar_filer_entry src[0], s32 size)
1261 {
1262         while (size > 0) {
1263                 size--;
1264                 dst[size].ctrl = src[size].ctrl;
1265                 dst[size].prop = src[size].prop;
1266         }
1267 }
1268
1269 /* Delete the contents of the filer-table between start and end
1270  * and collapse them
1271  */
1272 static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1273 {
1274         int length;
1275
1276         if (end > MAX_FILER_CACHE_IDX || end < begin)
1277                 return -EINVAL;
1278
1279         end++;
1280         length = end - begin;
1281
1282         /* Copy */
1283         while (end < tab->index) {
1284                 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1285                 tab->fe[begin++].prop = tab->fe[end++].prop;
1286
1287         }
1288         /* Fill up with don't cares */
1289         while (begin < tab->index) {
1290                 tab->fe[begin].ctrl = 0x60;
1291                 tab->fe[begin].prop = 0xFFFFFFFF;
1292                 begin++;
1293         }
1294
1295         tab->index -= length;
1296         return 0;
1297 }
1298
1299 /* Make space on the wanted location */
1300 static int gfar_expand_filer_entries(u32 begin, u32 length,
1301                                      struct filer_table *tab)
1302 {
1303         if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1304             begin > MAX_FILER_CACHE_IDX)
1305                 return -EINVAL;
1306
1307         gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1308                                 tab->index - length + 1);
1309
1310         tab->index += length;
1311         return 0;
1312 }
1313
1314 static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1315 {
1316         for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1317              start++) {
1318                 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1319                     (RQFCR_AND | RQFCR_CLE))
1320                         return start;
1321         }
1322         return -1;
1323 }
1324
1325 static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1326 {
1327         for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1328              start++) {
1329                 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1330                     (RQFCR_CLE))
1331                         return start;
1332         }
1333         return -1;
1334 }
1335
1336 /* Uses hardwares clustering option to reduce
1337  * the number of filer table entries
1338  */
1339 static void gfar_cluster_filer(struct filer_table *tab)
1340 {
1341         s32 i = -1, j, iend, jend;
1342
1343         while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1344                 j = i;
1345                 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1346                         /* The cluster entries self and the previous one
1347                          * (a mask) must be identical!
1348                          */
1349                         if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1350                                 break;
1351                         if (tab->fe[i].prop != tab->fe[j].prop)
1352                                 break;
1353                         if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1354                                 break;
1355                         if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1356                                 break;
1357                         iend = gfar_get_next_cluster_end(i, tab);
1358                         jend = gfar_get_next_cluster_end(j, tab);
1359                         if (jend == -1 || iend == -1)
1360                                 break;
1361
1362                         /* First we make some free space, where our cluster
1363                          * element should be. Then we copy it there and finally
1364                          * delete in from its old location.
1365                          */
1366                         if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1367                             -EINVAL)
1368                                 break;
1369
1370                         gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1371                                                 &(tab->fe[jend + 1]), jend - j);
1372
1373                         if (gfar_trim_filer_entries(jend - 1,
1374                                                     jend + (jend - j),
1375                                                     tab) == -EINVAL)
1376                                 return;
1377
1378                         /* Mask out cluster bit */
1379                         tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1380                 }
1381         }
1382 }
1383
1384 /* Swaps the masked bits of a1<>a2 and b1<>b2 */
1385 static void gfar_swap_bits(struct gfar_filer_entry *a1,
1386                            struct gfar_filer_entry *a2,
1387                            struct gfar_filer_entry *b1,
1388                            struct gfar_filer_entry *b2, u32 mask)
1389 {
1390         u32 temp[4];
1391         temp[0] = a1->ctrl & mask;
1392         temp[1] = a2->ctrl & mask;
1393         temp[2] = b1->ctrl & mask;
1394         temp[3] = b2->ctrl & mask;
1395
1396         a1->ctrl &= ~mask;
1397         a2->ctrl &= ~mask;
1398         b1->ctrl &= ~mask;
1399         b2->ctrl &= ~mask;
1400
1401         a1->ctrl |= temp[1];
1402         a2->ctrl |= temp[0];
1403         b1->ctrl |= temp[3];
1404         b2->ctrl |= temp[2];
1405 }
1406
1407 /* Generate a list consisting of masks values with their start and
1408  * end of validity and block as indicator for parts belonging
1409  * together (glued by ANDs) in mask_table
1410  */
1411 static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1412                                     struct filer_table *tab)
1413 {
1414         u32 i, and_index = 0, block_index = 1;
1415
1416         for (i = 0; i < tab->index; i++) {
1417
1418                 /* LSByte of control = 0 sets a mask */
1419                 if (!(tab->fe[i].ctrl & 0xF)) {
1420                         mask_table[and_index].mask = tab->fe[i].prop;
1421                         mask_table[and_index].start = i;
1422                         mask_table[and_index].block = block_index;
1423                         if (and_index >= 1)
1424                                 mask_table[and_index - 1].end = i - 1;
1425                         and_index++;
1426                 }
1427                 /* cluster starts and ends will be separated because they should
1428                  * hold their position
1429                  */
1430                 if (tab->fe[i].ctrl & RQFCR_CLE)
1431                         block_index++;
1432                 /* A not set AND indicates the end of a depended block */
1433                 if (!(tab->fe[i].ctrl & RQFCR_AND))
1434                         block_index++;
1435         }
1436
1437         mask_table[and_index - 1].end = i - 1;
1438
1439         return and_index;
1440 }
1441
1442 /* Sorts the entries of mask_table by the values of the masks.
1443  * Important: The 0xFF80 flags of the first and last entry of a
1444  * block must hold their position (which queue, CLusterEnable, ReJEct,
1445  * AND)
1446  */
1447 static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1448                                  struct filer_table *temp_table, u32 and_index)
1449 {
1450         /* Pointer to compare function (_asc or _desc) */
1451         int (*gfar_comp)(const void *, const void *);
1452
1453         u32 i, size = 0, start = 0, prev = 1;
1454         u32 old_first, old_last, new_first, new_last;
1455
1456         gfar_comp = &gfar_comp_desc;
1457
1458         for (i = 0; i < and_index; i++) {
1459                 if (prev != mask_table[i].block) {
1460                         old_first = mask_table[start].start + 1;
1461                         old_last = mask_table[i - 1].end;
1462                         sort(mask_table + start, size,
1463                              sizeof(struct gfar_mask_entry),
1464                              gfar_comp, &gfar_swap);
1465
1466                         /* Toggle order for every block. This makes the
1467                          * thing more efficient!
1468                          */
1469                         if (gfar_comp == gfar_comp_desc)
1470                                 gfar_comp = &gfar_comp_asc;
1471                         else
1472                                 gfar_comp = &gfar_comp_desc;
1473
1474                         new_first = mask_table[start].start + 1;
1475                         new_last = mask_table[i - 1].end;
1476
1477                         gfar_swap_bits(&temp_table->fe[new_first],
1478                                        &temp_table->fe[old_first],
1479                                        &temp_table->fe[new_last],
1480                                        &temp_table->fe[old_last],
1481                                        RQFCR_QUEUE | RQFCR_CLE |
1482                                        RQFCR_RJE | RQFCR_AND);
1483
1484                         start = i;
1485                         size = 0;
1486                 }
1487                 size++;
1488                 prev = mask_table[i].block;
1489         }
1490 }
1491
1492 /* Reduces the number of masks needed in the filer table to save entries
1493  * This is done by sorting the masks of a depended block. A depended block is
1494  * identified by gluing ANDs or CLE. The sorting order toggles after every
1495  * block. Of course entries in scope of a mask must change their location with
1496  * it.
1497  */
1498 static int gfar_optimize_filer_masks(struct filer_table *tab)
1499 {
1500         struct filer_table *temp_table;
1501         struct gfar_mask_entry *mask_table;
1502
1503         u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1504         s32 ret = 0;
1505
1506         /* We need a copy of the filer table because
1507          * we want to change its order
1508          */
1509         temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1510         if (temp_table == NULL)
1511                 return -ENOMEM;
1512
1513         mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1514                              sizeof(struct gfar_mask_entry), GFP_KERNEL);
1515
1516         if (mask_table == NULL) {
1517                 ret = -ENOMEM;
1518                 goto end;
1519         }
1520
1521         and_index = gfar_generate_mask_table(mask_table, tab);
1522
1523         gfar_sort_mask_table(mask_table, temp_table, and_index);
1524
1525         /* Now we can copy the data from our duplicated filer table to
1526          * the real one in the order the mask table says
1527          */
1528         for (i = 0; i < and_index; i++) {
1529                 size = mask_table[i].end - mask_table[i].start + 1;
1530                 gfar_copy_filer_entries(&(tab->fe[j]),
1531                                 &(temp_table->fe[mask_table[i].start]), size);
1532                 j += size;
1533         }
1534
1535         /* And finally we just have to check for duplicated masks and drop the
1536          * second ones
1537          */
1538         for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1539                 if (tab->fe[i].ctrl == 0x80) {
1540                         previous_mask = i++;
1541                         break;
1542                 }
1543         }
1544         for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1545                 if (tab->fe[i].ctrl == 0x80) {
1546                         if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1547                                 /* Two identical ones found!
1548                                  * So drop the second one!
1549                                  */
1550                                 gfar_trim_filer_entries(i, i, tab);
1551                         } else
1552                                 /* Not identical! */
1553                                 previous_mask = i;
1554                 }
1555         }
1556
1557         kfree(mask_table);
1558 end:    kfree(temp_table);
1559         return ret;
1560 }
1561
1562 /* Write the bit-pattern from software's buffer to hardware registers */
1563 static int gfar_write_filer_table(struct gfar_private *priv,
1564                                   struct filer_table *tab)
1565 {
1566         u32 i = 0;
1567         if (tab->index > MAX_FILER_IDX - 1)
1568                 return -EBUSY;
1569
1570         /* Avoid inconsistent filer table to be processed */
1571         lock_rx_qs(priv);
1572
1573         /* Fill regular entries */
1574         for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1575              i++)
1576                 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1577         /* Fill the rest with fall-troughs */
1578         for (; i < MAX_FILER_IDX - 1; i++)
1579                 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1580         /* Last entry must be default accept
1581          * because that's what people expect
1582          */
1583         gfar_write_filer(priv, i, 0x20, 0x0);
1584
1585         unlock_rx_qs(priv);
1586
1587         return 0;
1588 }
1589
1590 static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1591                                  struct gfar_private *priv)
1592 {
1593
1594         if (flow->flow_type & FLOW_EXT) {
1595                 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1596                         netdev_warn(priv->ndev,
1597                                     "User-specific data not supported!\n");
1598                 if (~flow->m_ext.vlan_etype)
1599                         netdev_warn(priv->ndev,
1600                                     "VLAN-etype not supported!\n");
1601         }
1602         if (flow->flow_type == IP_USER_FLOW)
1603                 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1604                         netdev_warn(priv->ndev,
1605                                     "IP-Version differing from IPv4 not supported!\n");
1606
1607         return 0;
1608 }
1609
1610 static int gfar_process_filer_changes(struct gfar_private *priv)
1611 {
1612         struct ethtool_flow_spec_container *j;
1613         struct filer_table *tab;
1614         s32 i = 0;
1615         s32 ret = 0;
1616
1617         /* So index is set to zero, too! */
1618         tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1619         if (tab == NULL)
1620                 return -ENOMEM;
1621
1622         /* Now convert the existing filer data from flow_spec into
1623          * filer tables binary format
1624          */
1625         list_for_each_entry(j, &priv->rx_list.list, list) {
1626                 ret = gfar_convert_to_filer(&j->fs, tab);
1627                 if (ret == -EBUSY) {
1628                         netdev_err(priv->ndev,
1629                                    "Rule not added: No free space!\n");
1630                         goto end;
1631                 }
1632                 if (ret == -1) {
1633                         netdev_err(priv->ndev,
1634                                    "Rule not added: Unsupported Flow-type!\n");
1635                         goto end;
1636                 }
1637         }
1638
1639         i = tab->index;
1640
1641         /* Optimizations to save entries */
1642         gfar_cluster_filer(tab);
1643         gfar_optimize_filer_masks(tab);
1644
1645         pr_debug("\tSummary:\n"
1646                  "\tData on hardware: %d\n"
1647                  "\tCompression rate: %d%%\n",
1648                  tab->index, 100 - (100 * tab->index) / i);
1649
1650         /* Write everything to hardware */
1651         ret = gfar_write_filer_table(priv, tab);
1652         if (ret == -EBUSY) {
1653                 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1654                 goto end;
1655         }
1656
1657 end:
1658         kfree(tab);
1659         return ret;
1660 }
1661
1662 static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1663 {
1664         u32 i = 0;
1665
1666         for (i = 0; i < sizeof(flow->m_u); i++)
1667                 flow->m_u.hdata[i] ^= 0xFF;
1668
1669         flow->m_ext.vlan_etype ^= 0xFFFF;
1670         flow->m_ext.vlan_tci ^= 0xFFFF;
1671         flow->m_ext.data[0] ^= ~0;
1672         flow->m_ext.data[1] ^= ~0;
1673 }
1674
1675 static int gfar_add_cls(struct gfar_private *priv,
1676                         struct ethtool_rx_flow_spec *flow)
1677 {
1678         struct ethtool_flow_spec_container *temp, *comp;
1679         int ret = 0;
1680
1681         temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1682         if (temp == NULL)
1683                 return -ENOMEM;
1684         memcpy(&temp->fs, flow, sizeof(temp->fs));
1685
1686         gfar_invert_masks(&temp->fs);
1687         ret = gfar_check_capability(&temp->fs, priv);
1688         if (ret)
1689                 goto clean_mem;
1690         /* Link in the new element at the right @location */
1691         if (list_empty(&priv->rx_list.list)) {
1692                 ret = gfar_check_filer_hardware(priv);
1693                 if (ret != 0)
1694                         goto clean_mem;
1695                 list_add(&temp->list, &priv->rx_list.list);
1696                 goto process;
1697         } else {
1698                 list_for_each_entry(comp, &priv->rx_list.list, list) {
1699                         if (comp->fs.location > flow->location) {
1700                                 list_add_tail(&temp->list, &comp->list);
1701                                 goto process;
1702                         }
1703                         if (comp->fs.location == flow->location) {
1704                                 netdev_err(priv->ndev,
1705                                            "Rule not added: ID %d not free!\n",
1706                                            flow->location);
1707                                 ret = -EBUSY;
1708                                 goto clean_mem;
1709                         }
1710                 }
1711                 list_add_tail(&temp->list, &priv->rx_list.list);
1712         }
1713
1714 process:
1715         ret = gfar_process_filer_changes(priv);
1716         if (ret)
1717                 goto clean_list;
1718         priv->rx_list.count++;
1719         return ret;
1720
1721 clean_list:
1722         list_del(&temp->list);
1723 clean_mem:
1724         kfree(temp);
1725         return ret;
1726 }
1727
1728 static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1729 {
1730         struct ethtool_flow_spec_container *comp;
1731         u32 ret = -EINVAL;
1732
1733         if (list_empty(&priv->rx_list.list))
1734                 return ret;
1735
1736         list_for_each_entry(comp, &priv->rx_list.list, list) {
1737                 if (comp->fs.location == loc) {
1738                         list_del(&comp->list);
1739                         kfree(comp);
1740                         priv->rx_list.count--;
1741                         gfar_process_filer_changes(priv);
1742                         ret = 0;
1743                         break;
1744                 }
1745         }
1746
1747         return ret;
1748 }
1749
1750 static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1751 {
1752         struct ethtool_flow_spec_container *comp;
1753         u32 ret = -EINVAL;
1754
1755         list_for_each_entry(comp, &priv->rx_list.list, list) {
1756                 if (comp->fs.location == cmd->fs.location) {
1757                         memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1758                         gfar_invert_masks(&cmd->fs);
1759                         ret = 0;
1760                         break;
1761                 }
1762         }
1763
1764         return ret;
1765 }
1766
1767 static int gfar_get_cls_all(struct gfar_private *priv,
1768                             struct ethtool_rxnfc *cmd, u32 *rule_locs)
1769 {
1770         struct ethtool_flow_spec_container *comp;
1771         u32 i = 0;
1772
1773         list_for_each_entry(comp, &priv->rx_list.list, list) {
1774                 if (i == cmd->rule_cnt)
1775                         return -EMSGSIZE;
1776                 rule_locs[i] = comp->fs.location;
1777                 i++;
1778         }
1779
1780         cmd->data = MAX_FILER_IDX;
1781         cmd->rule_cnt = i;
1782
1783         return 0;
1784 }
1785
1786 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1787 {
1788         struct gfar_private *priv = netdev_priv(dev);
1789         int ret = 0;
1790
1791         mutex_lock(&priv->rx_queue_access);
1792
1793         switch (cmd->cmd) {
1794         case ETHTOOL_SRXFH:
1795                 ret = gfar_set_hash_opts(priv, cmd);
1796                 break;
1797         case ETHTOOL_SRXCLSRLINS:
1798                 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1799                      cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1800                     cmd->fs.location >= MAX_FILER_IDX) {
1801                         ret = -EINVAL;
1802                         break;
1803                 }
1804                 ret = gfar_add_cls(priv, &cmd->fs);
1805                 break;
1806         case ETHTOOL_SRXCLSRLDEL:
1807                 ret = gfar_del_cls(priv, cmd->fs.location);
1808                 break;
1809         default:
1810                 ret = -EINVAL;
1811         }
1812
1813         mutex_unlock(&priv->rx_queue_access);
1814
1815         return ret;
1816 }
1817
1818 static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1819                         u32 *rule_locs)
1820 {
1821         struct gfar_private *priv = netdev_priv(dev);
1822         int ret = 0;
1823
1824         switch (cmd->cmd) {
1825         case ETHTOOL_GRXRINGS:
1826                 cmd->data = priv->num_rx_queues;
1827                 break;
1828         case ETHTOOL_GRXCLSRLCNT:
1829                 cmd->rule_cnt = priv->rx_list.count;
1830                 break;
1831         case ETHTOOL_GRXCLSRULE:
1832                 ret = gfar_get_cls(priv, cmd);
1833                 break;
1834         case ETHTOOL_GRXCLSRLALL:
1835                 ret = gfar_get_cls_all(priv, cmd, rule_locs);
1836                 break;
1837         default:
1838                 ret = -EINVAL;
1839                 break;
1840         }
1841
1842         return ret;
1843 }
1844
1845 int gfar_phc_index = -1;
1846 EXPORT_SYMBOL(gfar_phc_index);
1847
1848 static int gfar_get_ts_info(struct net_device *dev,
1849                             struct ethtool_ts_info *info)
1850 {
1851         struct gfar_private *priv = netdev_priv(dev);
1852
1853         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1854                 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1855                                         SOF_TIMESTAMPING_SOFTWARE;
1856                 info->phc_index = -1;
1857                 return 0;
1858         }
1859         info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1860                                 SOF_TIMESTAMPING_RX_HARDWARE |
1861                                 SOF_TIMESTAMPING_RAW_HARDWARE;
1862         info->phc_index = gfar_phc_index;
1863         info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1864                          (1 << HWTSTAMP_TX_ON);
1865         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1866                            (1 << HWTSTAMP_FILTER_ALL);
1867         return 0;
1868 }
1869
1870 const struct ethtool_ops gfar_ethtool_ops = {
1871         .get_settings = gfar_gsettings,
1872         .set_settings = gfar_ssettings,
1873         .get_drvinfo = gfar_gdrvinfo,
1874         .get_regs_len = gfar_reglen,
1875         .get_regs = gfar_get_regs,
1876         .get_link = ethtool_op_get_link,
1877         .get_coalesce = gfar_gcoalesce,
1878         .set_coalesce = gfar_scoalesce,
1879         .get_ringparam = gfar_gringparam,
1880         .set_ringparam = gfar_sringparam,
1881         .get_pauseparam = gfar_gpauseparam,
1882         .set_pauseparam = gfar_spauseparam,
1883         .get_strings = gfar_gstrings,
1884         .get_sset_count = gfar_sset_count,
1885         .get_ethtool_stats = gfar_fill_stats,
1886         .get_msglevel = gfar_get_msglevel,
1887         .set_msglevel = gfar_set_msglevel,
1888 #ifdef CONFIG_PM
1889         .get_wol = gfar_get_wol,
1890         .set_wol = gfar_set_wol,
1891 #endif
1892         .set_rxnfc = gfar_set_nfc,
1893         .get_rxnfc = gfar_get_nfc,
1894         .get_ts_info = gfar_get_ts_info,
1895 };