]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/freescale/gianfar_ethtool.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / drivers / net / ethernet / freescale / gianfar_ethtool.c
1 /*
2  *  drivers/net/ethernet/freescale/gianfar_ethtool.c
3  *
4  *  Gianfar Ethernet Driver
5  *  Ethtool support for Gianfar Enet
6  *  Based on e1000 ethtool support
7  *
8  *  Author: Andy Fleming
9  *  Maintainer: Kumar Gala
10  *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
13  *
14  *  This software may be used and distributed according to
15  *  the terms of the GNU Public License, Version 2, incorporated herein
16  *  by reference.
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/skbuff.h>
31 #include <linux/spinlock.h>
32 #include <linux/mm.h>
33
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/uaccess.h>
37 #include <linux/module.h>
38 #include <linux/crc32.h>
39 #include <asm/types.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/phy.h>
43 #include <linux/sort.h>
44 #include <linux/if_vlan.h>
45
46 #include "gianfar.h"
47
48 extern void gfar_start(struct net_device *dev);
49 extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
50                               int rx_work_limit);
51
52 #define GFAR_MAX_COAL_USECS 0xffff
53 #define GFAR_MAX_COAL_FRAMES 0xff
54 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
55                             u64 *buf);
56 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
57 static int gfar_gcoalesce(struct net_device *dev,
58                           struct ethtool_coalesce *cvals);
59 static int gfar_scoalesce(struct net_device *dev,
60                           struct ethtool_coalesce *cvals);
61 static void gfar_gringparam(struct net_device *dev,
62                             struct ethtool_ringparam *rvals);
63 static int gfar_sringparam(struct net_device *dev,
64                            struct ethtool_ringparam *rvals);
65 static void gfar_gdrvinfo(struct net_device *dev,
66                           struct ethtool_drvinfo *drvinfo);
67
68 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
69         "rx-dropped-by-kernel",
70         "rx-large-frame-errors",
71         "rx-short-frame-errors",
72         "rx-non-octet-errors",
73         "rx-crc-errors",
74         "rx-overrun-errors",
75         "rx-busy-errors",
76         "rx-babbling-errors",
77         "rx-truncated-frames",
78         "ethernet-bus-error",
79         "tx-babbling-errors",
80         "tx-underrun-errors",
81         "rx-skb-missing-errors",
82         "tx-timeout-errors",
83         "tx-rx-64-frames",
84         "tx-rx-65-127-frames",
85         "tx-rx-128-255-frames",
86         "tx-rx-256-511-frames",
87         "tx-rx-512-1023-frames",
88         "tx-rx-1024-1518-frames",
89         "tx-rx-1519-1522-good-vlan",
90         "rx-bytes",
91         "rx-packets",
92         "rx-fcs-errors",
93         "receive-multicast-packet",
94         "receive-broadcast-packet",
95         "rx-control-frame-packets",
96         "rx-pause-frame-packets",
97         "rx-unknown-op-code",
98         "rx-alignment-error",
99         "rx-frame-length-error",
100         "rx-code-error",
101         "rx-carrier-sense-error",
102         "rx-undersize-packets",
103         "rx-oversize-packets",
104         "rx-fragmented-frames",
105         "rx-jabber-frames",
106         "rx-dropped-frames",
107         "tx-byte-counter",
108         "tx-packets",
109         "tx-multicast-packets",
110         "tx-broadcast-packets",
111         "tx-pause-control-frames",
112         "tx-deferral-packets",
113         "tx-excessive-deferral-packets",
114         "tx-single-collision-packets",
115         "tx-multiple-collision-packets",
116         "tx-late-collision-packets",
117         "tx-excessive-collision-packets",
118         "tx-total-collision",
119         "reserved",
120         "tx-dropped-frames",
121         "tx-jabber-frames",
122         "tx-fcs-errors",
123         "tx-control-frames",
124         "tx-oversize-frames",
125         "tx-undersize-frames",
126         "tx-fragmented-frames",
127 };
128
129 /* Fill in a buffer with the strings which correspond to the
130  * stats */
131 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
132 {
133         struct gfar_private *priv = netdev_priv(dev);
134
135         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
136                 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
137         else
138                 memcpy(buf, stat_gstrings,
139                        GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
140 }
141
142 /* Fill in an array of 64-bit statistics from various sources.
143  * This array will be appended to the end of the ethtool_stats
144  * structure, and returned to user space
145  */
146 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
147                             u64 *buf)
148 {
149         int i;
150         struct gfar_private *priv = netdev_priv(dev);
151         struct gfar __iomem *regs = priv->gfargrp[0].regs;
152         atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
153
154         for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
155                 buf[i] = atomic64_read(&extra[i]);
156
157         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
158                 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
159
160                 for (; i < GFAR_STATS_LEN; i++, rmon++)
161                         buf[i] = (u64) gfar_read(rmon);
162         }
163 }
164
165 static int gfar_sset_count(struct net_device *dev, int sset)
166 {
167         struct gfar_private *priv = netdev_priv(dev);
168
169         switch (sset) {
170         case ETH_SS_STATS:
171                 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
172                         return GFAR_STATS_LEN;
173                 else
174                         return GFAR_EXTRA_STATS_LEN;
175         default:
176                 return -EOPNOTSUPP;
177         }
178 }
179
180 /* Fills in the drvinfo structure with some basic info */
181 static void gfar_gdrvinfo(struct net_device *dev,
182                           struct ethtool_drvinfo *drvinfo)
183 {
184         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
185         strlcpy(drvinfo->version, gfar_driver_version,
186                 sizeof(drvinfo->version));
187         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
188         strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
189         drvinfo->regdump_len = 0;
190         drvinfo->eedump_len = 0;
191 }
192
193
194 static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
195 {
196         struct gfar_private *priv = netdev_priv(dev);
197         struct phy_device *phydev = priv->phydev;
198
199         if (NULL == phydev)
200                 return -ENODEV;
201
202         return phy_ethtool_sset(phydev, cmd);
203 }
204
205
206 /* Return the current settings in the ethtool_cmd structure */
207 static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
208 {
209         struct gfar_private *priv = netdev_priv(dev);
210         struct phy_device *phydev = priv->phydev;
211         struct gfar_priv_rx_q *rx_queue = NULL;
212         struct gfar_priv_tx_q *tx_queue = NULL;
213
214         if (NULL == phydev)
215                 return -ENODEV;
216         tx_queue = priv->tx_queue[0];
217         rx_queue = priv->rx_queue[0];
218
219         /* etsec-1.7 and older versions have only one txic
220          * and rxic regs although they support multiple queues */
221         cmd->maxtxpkt = get_icft_value(tx_queue->txic);
222         cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
223
224         return phy_ethtool_gset(phydev, cmd);
225 }
226
227 /* Return the length of the register structure */
228 static int gfar_reglen(struct net_device *dev)
229 {
230         return sizeof (struct gfar);
231 }
232
233 /* Return a dump of the GFAR register space */
234 static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
235                           void *regbuf)
236 {
237         int i;
238         struct gfar_private *priv = netdev_priv(dev);
239         u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
240         u32 *buf = (u32 *) regbuf;
241
242         for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
243                 buf[i] = gfar_read(&theregs[i]);
244 }
245
246 /* Convert microseconds to ethernet clock ticks, which changes
247  * depending on what speed the controller is running at */
248 static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
249                                      unsigned int usecs)
250 {
251         unsigned int count;
252
253         /* The timer is different, depending on the interface speed */
254         switch (priv->phydev->speed) {
255         case SPEED_1000:
256                 count = GFAR_GBIT_TIME;
257                 break;
258         case SPEED_100:
259                 count = GFAR_100_TIME;
260                 break;
261         case SPEED_10:
262         default:
263                 count = GFAR_10_TIME;
264                 break;
265         }
266
267         /* Make sure we return a number greater than 0
268          * if usecs > 0 */
269         return (usecs * 1000 + count - 1) / count;
270 }
271
272 /* Convert ethernet clock ticks to microseconds */
273 static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
274                                      unsigned int ticks)
275 {
276         unsigned int count;
277
278         /* The timer is different, depending on the interface speed */
279         switch (priv->phydev->speed) {
280         case SPEED_1000:
281                 count = GFAR_GBIT_TIME;
282                 break;
283         case SPEED_100:
284                 count = GFAR_100_TIME;
285                 break;
286         case SPEED_10:
287         default:
288                 count = GFAR_10_TIME;
289                 break;
290         }
291
292         /* Make sure we return a number greater than 0 */
293         /* if ticks is > 0 */
294         return (ticks * count) / 1000;
295 }
296
297 /* Get the coalescing parameters, and put them in the cvals
298  * structure.  */
299 static int gfar_gcoalesce(struct net_device *dev,
300                           struct ethtool_coalesce *cvals)
301 {
302         struct gfar_private *priv = netdev_priv(dev);
303         struct gfar_priv_rx_q *rx_queue = NULL;
304         struct gfar_priv_tx_q *tx_queue = NULL;
305         unsigned long rxtime;
306         unsigned long rxcount;
307         unsigned long txtime;
308         unsigned long txcount;
309
310         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
311                 return -EOPNOTSUPP;
312
313         if (NULL == priv->phydev)
314                 return -ENODEV;
315
316         rx_queue = priv->rx_queue[0];
317         tx_queue = priv->tx_queue[0];
318
319         rxtime  = get_ictt_value(rx_queue->rxic);
320         rxcount = get_icft_value(rx_queue->rxic);
321         txtime  = get_ictt_value(tx_queue->txic);
322         txcount = get_icft_value(tx_queue->txic);
323         cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
324         cvals->rx_max_coalesced_frames = rxcount;
325
326         cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
327         cvals->tx_max_coalesced_frames = txcount;
328
329         cvals->use_adaptive_rx_coalesce = 0;
330         cvals->use_adaptive_tx_coalesce = 0;
331
332         cvals->pkt_rate_low = 0;
333         cvals->rx_coalesce_usecs_low = 0;
334         cvals->rx_max_coalesced_frames_low = 0;
335         cvals->tx_coalesce_usecs_low = 0;
336         cvals->tx_max_coalesced_frames_low = 0;
337
338         /* When the packet rate is below pkt_rate_high but above
339          * pkt_rate_low (both measured in packets per second) the
340          * normal {rx,tx}_* coalescing parameters are used.
341          */
342
343         /* When the packet rate is (measured in packets per second)
344          * is above pkt_rate_high, the {rx,tx}_*_high parameters are
345          * used.
346          */
347         cvals->pkt_rate_high = 0;
348         cvals->rx_coalesce_usecs_high = 0;
349         cvals->rx_max_coalesced_frames_high = 0;
350         cvals->tx_coalesce_usecs_high = 0;
351         cvals->tx_max_coalesced_frames_high = 0;
352
353         /* How often to do adaptive coalescing packet rate sampling,
354          * measured in seconds.  Must not be zero.
355          */
356         cvals->rate_sample_interval = 0;
357
358         return 0;
359 }
360
361 /* Change the coalescing values.
362  * Both cvals->*_usecs and cvals->*_frames have to be > 0
363  * in order for coalescing to be active
364  */
365 static int gfar_scoalesce(struct net_device *dev,
366                           struct ethtool_coalesce *cvals)
367 {
368         struct gfar_private *priv = netdev_priv(dev);
369         int i = 0;
370
371         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
372                 return -EOPNOTSUPP;
373
374         /* Set up rx coalescing */
375         /* As of now, we will enable/disable coalescing for all
376          * queues together in case of eTSEC2, this will be modified
377          * along with the ethtool interface
378          */
379         if ((cvals->rx_coalesce_usecs == 0) ||
380             (cvals->rx_max_coalesced_frames == 0)) {
381                 for (i = 0; i < priv->num_rx_queues; i++)
382                         priv->rx_queue[i]->rxcoalescing = 0;
383         } else {
384                 for (i = 0; i < priv->num_rx_queues; i++)
385                         priv->rx_queue[i]->rxcoalescing = 1;
386         }
387
388         if (NULL == priv->phydev)
389                 return -ENODEV;
390
391         /* Check the bounds of the values */
392         if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
393                 pr_info("Coalescing is limited to %d microseconds\n",
394                         GFAR_MAX_COAL_USECS);
395                 return -EINVAL;
396         }
397
398         if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
399                 pr_info("Coalescing is limited to %d frames\n",
400                         GFAR_MAX_COAL_FRAMES);
401                 return -EINVAL;
402         }
403
404         for (i = 0; i < priv->num_rx_queues; i++) {
405                 priv->rx_queue[i]->rxic = mk_ic_value(
406                         cvals->rx_max_coalesced_frames,
407                         gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
408         }
409
410         /* Set up tx coalescing */
411         if ((cvals->tx_coalesce_usecs == 0) ||
412             (cvals->tx_max_coalesced_frames == 0)) {
413                 for (i = 0; i < priv->num_tx_queues; i++)
414                         priv->tx_queue[i]->txcoalescing = 0;
415         } else {
416                 for (i = 0; i < priv->num_tx_queues; i++)
417                         priv->tx_queue[i]->txcoalescing = 1;
418         }
419
420         /* Check the bounds of the values */
421         if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
422                 pr_info("Coalescing is limited to %d microseconds\n",
423                         GFAR_MAX_COAL_USECS);
424                 return -EINVAL;
425         }
426
427         if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
428                 pr_info("Coalescing is limited to %d frames\n",
429                         GFAR_MAX_COAL_FRAMES);
430                 return -EINVAL;
431         }
432
433         for (i = 0; i < priv->num_tx_queues; i++) {
434                 priv->tx_queue[i]->txic = mk_ic_value(
435                         cvals->tx_max_coalesced_frames,
436                         gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
437         }
438
439         gfar_configure_coalescing(priv, 0xFF, 0xFF);
440
441         return 0;
442 }
443
444 /* Fills in rvals with the current ring parameters.  Currently,
445  * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
446  * jumbo are ignored by the driver */
447 static void gfar_gringparam(struct net_device *dev,
448                             struct ethtool_ringparam *rvals)
449 {
450         struct gfar_private *priv = netdev_priv(dev);
451         struct gfar_priv_tx_q *tx_queue = NULL;
452         struct gfar_priv_rx_q *rx_queue = NULL;
453
454         tx_queue = priv->tx_queue[0];
455         rx_queue = priv->rx_queue[0];
456
457         rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
458         rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
459         rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
460         rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
461
462         /* Values changeable by the user.  The valid values are
463          * in the range 1 to the "*_max_pending" counterpart above.
464          */
465         rvals->rx_pending = rx_queue->rx_ring_size;
466         rvals->rx_mini_pending = rx_queue->rx_ring_size;
467         rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
468         rvals->tx_pending = tx_queue->tx_ring_size;
469 }
470
471 /* Change the current ring parameters, stopping the controller if
472  * necessary so that we don't mess things up while we're in
473  * motion.  We wait for the ring to be clean before reallocating
474  * the rings.
475  */
476 static int gfar_sringparam(struct net_device *dev,
477                            struct ethtool_ringparam *rvals)
478 {
479         struct gfar_private *priv = netdev_priv(dev);
480         int err = 0, i = 0;
481
482         if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
483                 return -EINVAL;
484
485         if (!is_power_of_2(rvals->rx_pending)) {
486                 netdev_err(dev, "Ring sizes must be a power of 2\n");
487                 return -EINVAL;
488         }
489
490         if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
491                 return -EINVAL;
492
493         if (!is_power_of_2(rvals->tx_pending)) {
494                 netdev_err(dev, "Ring sizes must be a power of 2\n");
495                 return -EINVAL;
496         }
497
498
499         if (dev->flags & IFF_UP) {
500                 unsigned long flags;
501
502                 /* Halt TX and RX, and process the frames which
503                  * have already been received
504                  */
505                 local_irq_save(flags);
506                 lock_tx_qs(priv);
507                 lock_rx_qs(priv);
508
509                 gfar_halt(dev);
510
511                 unlock_rx_qs(priv);
512                 unlock_tx_qs(priv);
513                 local_irq_restore(flags);
514
515                 for (i = 0; i < priv->num_rx_queues; i++)
516                         gfar_clean_rx_ring(priv->rx_queue[i],
517                                            priv->rx_queue[i]->rx_ring_size);
518
519                 /* Now we take down the rings to rebuild them */
520                 stop_gfar(dev);
521         }
522
523         /* Change the size */
524         for (i = 0; i < priv->num_rx_queues; i++) {
525                 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
526                 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
527                 priv->tx_queue[i]->num_txbdfree =
528                         priv->tx_queue[i]->tx_ring_size;
529         }
530
531         /* Rebuild the rings with the new size */
532         if (dev->flags & IFF_UP) {
533                 err = startup_gfar(dev);
534                 netif_tx_wake_all_queues(dev);
535         }
536         return err;
537 }
538
539 int gfar_set_features(struct net_device *dev, netdev_features_t features)
540 {
541         struct gfar_private *priv = netdev_priv(dev);
542         unsigned long flags;
543         int err = 0, i = 0;
544         netdev_features_t changed = dev->features ^ features;
545
546         if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
547                 gfar_vlan_mode(dev, features);
548
549         if (!(changed & NETIF_F_RXCSUM))
550                 return 0;
551
552         if (dev->flags & IFF_UP) {
553                 /* Halt TX and RX, and process the frames which
554                  * have already been received
555                  */
556                 local_irq_save(flags);
557                 lock_tx_qs(priv);
558                 lock_rx_qs(priv);
559
560                 gfar_halt(dev);
561
562                 unlock_tx_qs(priv);
563                 unlock_rx_qs(priv);
564                 local_irq_restore(flags);
565
566                 for (i = 0; i < priv->num_rx_queues; i++)
567                         gfar_clean_rx_ring(priv->rx_queue[i],
568                                            priv->rx_queue[i]->rx_ring_size);
569
570                 /* Now we take down the rings to rebuild them */
571                 stop_gfar(dev);
572
573                 dev->features = features;
574
575                 err = startup_gfar(dev);
576                 netif_tx_wake_all_queues(dev);
577         }
578         return err;
579 }
580
581 static uint32_t gfar_get_msglevel(struct net_device *dev)
582 {
583         struct gfar_private *priv = netdev_priv(dev);
584
585         return priv->msg_enable;
586 }
587
588 static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
589 {
590         struct gfar_private *priv = netdev_priv(dev);
591
592         priv->msg_enable = data;
593 }
594
595 #ifdef CONFIG_PM
596 static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
597 {
598         struct gfar_private *priv = netdev_priv(dev);
599
600         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
601                 wol->supported = WAKE_MAGIC;
602                 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
603         } else {
604                 wol->supported = wol->wolopts = 0;
605         }
606 }
607
608 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
609 {
610         struct gfar_private *priv = netdev_priv(dev);
611         unsigned long flags;
612
613         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
614             wol->wolopts != 0)
615                 return -EINVAL;
616
617         if (wol->wolopts & ~WAKE_MAGIC)
618                 return -EINVAL;
619
620         device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
621
622         spin_lock_irqsave(&priv->bflock, flags);
623         priv->wol_en =  !!device_may_wakeup(&dev->dev);
624         spin_unlock_irqrestore(&priv->bflock, flags);
625
626         return 0;
627 }
628 #endif
629
630 static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
631 {
632         u32 fcr = 0x0, fpr = FPR_FILER_MASK;
633
634         if (ethflow & RXH_L2DA) {
635                 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
636                       RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
637                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
638                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
639                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
640                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
641
642                 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
643                       RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
644                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
645                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
646                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
647                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
648         }
649
650         if (ethflow & RXH_VLAN) {
651                 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
652                       RQFCR_AND | RQFCR_HASHTBL_0;
653                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
654                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
655                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
656                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
657         }
658
659         if (ethflow & RXH_IP_SRC) {
660                 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
661                       RQFCR_AND | RQFCR_HASHTBL_0;
662                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
663                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
664                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
665                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
666         }
667
668         if (ethflow & (RXH_IP_DST)) {
669                 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
670                       RQFCR_AND | RQFCR_HASHTBL_0;
671                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
672                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
673                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
674                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
675         }
676
677         if (ethflow & RXH_L3_PROTO) {
678                 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
679                       RQFCR_AND | RQFCR_HASHTBL_0;
680                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
681                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
682                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
683                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
684         }
685
686         if (ethflow & RXH_L4_B_0_1) {
687                 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
688                       RQFCR_AND | RQFCR_HASHTBL_0;
689                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
690                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
691                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
692                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
693         }
694
695         if (ethflow & RXH_L4_B_2_3) {
696                 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
697                       RQFCR_AND | RQFCR_HASHTBL_0;
698                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
699                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
700                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
701                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
702         }
703 }
704
705 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
706                                        u64 class)
707 {
708         unsigned int last_rule_idx = priv->cur_filer_idx;
709         unsigned int cmp_rqfpr;
710         unsigned int *local_rqfpr;
711         unsigned int *local_rqfcr;
712         int i = 0x0, k = 0x0;
713         int j = MAX_FILER_IDX, l = 0x0;
714         int ret = 1;
715
716         local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
717                                     GFP_KERNEL);
718         local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
719                                     GFP_KERNEL);
720         if (!local_rqfpr || !local_rqfcr) {
721                 ret = 0;
722                 goto err;
723         }
724
725         switch (class) {
726         case TCP_V4_FLOW:
727                 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
728                 break;
729         case UDP_V4_FLOW:
730                 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
731                 break;
732         case TCP_V6_FLOW:
733                 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
734                 break;
735         case UDP_V6_FLOW:
736                 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
737                 break;
738         default:
739                 pr_err("Right now this class is not supported\n");
740                 ret = 0;
741                 goto err;
742         }
743
744         for (i = 0; i < MAX_FILER_IDX + 1; i++) {
745                 local_rqfpr[j] = priv->ftp_rqfpr[i];
746                 local_rqfcr[j] = priv->ftp_rqfcr[i];
747                 j--;
748                 if ((priv->ftp_rqfcr[i] ==
749                      (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
750                     (priv->ftp_rqfpr[i] == cmp_rqfpr))
751                         break;
752         }
753
754         if (i == MAX_FILER_IDX + 1) {
755                 pr_err("No parse rule found, can't create hash rules\n");
756                 ret = 0;
757                 goto err;
758         }
759
760         /* If a match was found, then it begins the starting of a cluster rule
761          * if it was already programmed, we need to overwrite these rules
762          */
763         for (l = i+1; l < MAX_FILER_IDX; l++) {
764                 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
765                     !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
766                         priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
767                                              RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
768                         priv->ftp_rqfpr[l] = FPR_FILER_MASK;
769                         gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
770                                          priv->ftp_rqfpr[l]);
771                         break;
772                 }
773
774                 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
775                         (priv->ftp_rqfcr[l] & RQFCR_AND))
776                         continue;
777                 else {
778                         local_rqfpr[j] = priv->ftp_rqfpr[l];
779                         local_rqfcr[j] = priv->ftp_rqfcr[l];
780                         j--;
781                 }
782         }
783
784         priv->cur_filer_idx = l - 1;
785         last_rule_idx = l;
786
787         /* hash rules */
788         ethflow_to_filer_rules(priv, ethflow);
789
790         /* Write back the popped out rules again */
791         for (k = j+1; k < MAX_FILER_IDX; k++) {
792                 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
793                 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
794                 gfar_write_filer(priv, priv->cur_filer_idx,
795                                  local_rqfcr[k], local_rqfpr[k]);
796                 if (!priv->cur_filer_idx)
797                         break;
798                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
799         }
800
801 err:
802         kfree(local_rqfcr);
803         kfree(local_rqfpr);
804         return ret;
805 }
806
807 static int gfar_set_hash_opts(struct gfar_private *priv,
808                               struct ethtool_rxnfc *cmd)
809 {
810         /* write the filer rules here */
811         if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
812                 return -EINVAL;
813
814         return 0;
815 }
816
817 static int gfar_check_filer_hardware(struct gfar_private *priv)
818 {
819         struct gfar __iomem *regs = NULL;
820         u32 i;
821
822         regs = priv->gfargrp[0].regs;
823
824         /* Check if we are in FIFO mode */
825         i = gfar_read(&regs->ecntrl);
826         i &= ECNTRL_FIFM;
827         if (i == ECNTRL_FIFM) {
828                 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
829                 i = gfar_read(&regs->rctrl);
830                 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
831                 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
832                         netdev_info(priv->ndev,
833                                     "Receive Queue Filtering enabled\n");
834                 } else {
835                         netdev_warn(priv->ndev,
836                                     "Receive Queue Filtering disabled\n");
837                         return -EOPNOTSUPP;
838                 }
839         }
840         /* Or in standard mode */
841         else {
842                 i = gfar_read(&regs->rctrl);
843                 i &= RCTRL_PRSDEP_MASK;
844                 if (i == RCTRL_PRSDEP_MASK) {
845                         netdev_info(priv->ndev,
846                                     "Receive Queue Filtering enabled\n");
847                 } else {
848                         netdev_warn(priv->ndev,
849                                     "Receive Queue Filtering disabled\n");
850                         return -EOPNOTSUPP;
851                 }
852         }
853
854         /* Sets the properties for arbitrary filer rule
855          * to the first 4 Layer 4 Bytes
856          */
857         regs->rbifx = 0xC0C1C2C3;
858         return 0;
859 }
860
861 static int gfar_comp_asc(const void *a, const void *b)
862 {
863         return memcmp(a, b, 4);
864 }
865
866 static int gfar_comp_desc(const void *a, const void *b)
867 {
868         return -memcmp(a, b, 4);
869 }
870
871 static void gfar_swap(void *a, void *b, int size)
872 {
873         u32 *_a = a;
874         u32 *_b = b;
875
876         swap(_a[0], _b[0]);
877         swap(_a[1], _b[1]);
878         swap(_a[2], _b[2]);
879         swap(_a[3], _b[3]);
880 }
881
882 /* Write a mask to filer cache */
883 static void gfar_set_mask(u32 mask, struct filer_table *tab)
884 {
885         tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
886         tab->fe[tab->index].prop = mask;
887         tab->index++;
888 }
889
890 /* Sets parse bits (e.g. IP or TCP) */
891 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
892 {
893         gfar_set_mask(mask, tab);
894         tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
895                                    RQFCR_AND;
896         tab->fe[tab->index].prop = value;
897         tab->index++;
898 }
899
900 static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
901                                        struct filer_table *tab)
902 {
903         gfar_set_mask(mask, tab);
904         tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
905         tab->fe[tab->index].prop = value;
906         tab->index++;
907 }
908
909 /* For setting a tuple of value and mask of type flag
910  * Example:
911  * IP-Src = 10.0.0.0/255.0.0.0
912  * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
913  *
914  * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
915  * For a don't care mask it gives us a 0
916  *
917  * The check if don't care and the mask adjustment if mask=0 is done for VLAN
918  * and MAC stuff on an upper level (due to missing information on this level).
919  * For these guys we can discard them if they are value=0 and mask=0.
920  *
921  * Further the all masks are one-padded for better hardware efficiency.
922  */
923 static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
924                                struct filer_table *tab)
925 {
926         switch (flag) {
927                 /* 3bit */
928         case RQFCR_PID_PRI:
929                 if (!(value | mask))
930                         return;
931                 mask |= RQFCR_PID_PRI_MASK;
932                 break;
933                 /* 8bit */
934         case RQFCR_PID_L4P:
935         case RQFCR_PID_TOS:
936                 if (!~(mask | RQFCR_PID_L4P_MASK))
937                         return;
938                 if (!mask)
939                         mask = ~0;
940                 else
941                         mask |= RQFCR_PID_L4P_MASK;
942                 break;
943                 /* 12bit */
944         case RQFCR_PID_VID:
945                 if (!(value | mask))
946                         return;
947                 mask |= RQFCR_PID_VID_MASK;
948                 break;
949                 /* 16bit */
950         case RQFCR_PID_DPT:
951         case RQFCR_PID_SPT:
952         case RQFCR_PID_ETY:
953                 if (!~(mask | RQFCR_PID_PORT_MASK))
954                         return;
955                 if (!mask)
956                         mask = ~0;
957                 else
958                         mask |= RQFCR_PID_PORT_MASK;
959                 break;
960                 /* 24bit */
961         case RQFCR_PID_DAH:
962         case RQFCR_PID_DAL:
963         case RQFCR_PID_SAH:
964         case RQFCR_PID_SAL:
965                 if (!(value | mask))
966                         return;
967                 mask |= RQFCR_PID_MAC_MASK;
968                 break;
969                 /* for all real 32bit masks */
970         default:
971                 if (!~mask)
972                         return;
973                 if (!mask)
974                         mask = ~0;
975                 break;
976         }
977         gfar_set_general_attribute(value, mask, flag, tab);
978 }
979
980 /* Translates value and mask for UDP, TCP or SCTP */
981 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
982                               struct ethtool_tcpip4_spec *mask,
983                               struct filer_table *tab)
984 {
985         gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
986         gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
987         gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
988         gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
989         gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
990 }
991
992 /* Translates value and mask for RAW-IP4 */
993 static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
994                              struct ethtool_usrip4_spec *mask,
995                              struct filer_table *tab)
996 {
997         gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
998         gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
999         gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1000         gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
1001         gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
1002                            tab);
1003
1004 }
1005
1006 /* Translates value and mask for ETHER spec */
1007 static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
1008                            struct filer_table *tab)
1009 {
1010         u32 upper_temp_mask = 0;
1011         u32 lower_temp_mask = 0;
1012
1013         /* Source address */
1014         if (!is_broadcast_ether_addr(mask->h_source)) {
1015                 if (is_zero_ether_addr(mask->h_source)) {
1016                         upper_temp_mask = 0xFFFFFFFF;
1017                         lower_temp_mask = 0xFFFFFFFF;
1018                 } else {
1019                         upper_temp_mask = mask->h_source[0] << 16 |
1020                                           mask->h_source[1] << 8  |
1021                                           mask->h_source[2];
1022                         lower_temp_mask = mask->h_source[3] << 16 |
1023                                           mask->h_source[4] << 8  |
1024                                           mask->h_source[5];
1025                 }
1026                 /* Upper 24bit */
1027                 gfar_set_attribute(value->h_source[0] << 16 |
1028                                    value->h_source[1] << 8  |
1029                                    value->h_source[2],
1030                                    upper_temp_mask, RQFCR_PID_SAH, tab);
1031                 /* And the same for the lower part */
1032                 gfar_set_attribute(value->h_source[3] << 16 |
1033                                    value->h_source[4] << 8  |
1034                                    value->h_source[5],
1035                                    lower_temp_mask, RQFCR_PID_SAL, tab);
1036         }
1037         /* Destination address */
1038         if (!is_broadcast_ether_addr(mask->h_dest)) {
1039                 /* Special for destination is limited broadcast */
1040                 if ((is_broadcast_ether_addr(value->h_dest) &&
1041                     is_zero_ether_addr(mask->h_dest))) {
1042                         gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1043                 } else {
1044                         if (is_zero_ether_addr(mask->h_dest)) {
1045                                 upper_temp_mask = 0xFFFFFFFF;
1046                                 lower_temp_mask = 0xFFFFFFFF;
1047                         } else {
1048                                 upper_temp_mask = mask->h_dest[0] << 16 |
1049                                                   mask->h_dest[1] << 8  |
1050                                                   mask->h_dest[2];
1051                                 lower_temp_mask = mask->h_dest[3] << 16 |
1052                                                   mask->h_dest[4] << 8  |
1053                                                   mask->h_dest[5];
1054                         }
1055
1056                         /* Upper 24bit */
1057                         gfar_set_attribute(value->h_dest[0] << 16 |
1058                                            value->h_dest[1] << 8  |
1059                                            value->h_dest[2],
1060                                            upper_temp_mask, RQFCR_PID_DAH, tab);
1061                         /* And the same for the lower part */
1062                         gfar_set_attribute(value->h_dest[3] << 16 |
1063                                            value->h_dest[4] << 8  |
1064                                            value->h_dest[5],
1065                                            lower_temp_mask, RQFCR_PID_DAL, tab);
1066                 }
1067         }
1068
1069         gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
1070 }
1071
1072 /* Convert a rule to binary filter format of gianfar */
1073 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1074                                  struct filer_table *tab)
1075 {
1076         u32 vlan = 0, vlan_mask = 0;
1077         u32 id = 0, id_mask = 0;
1078         u32 cfi = 0, cfi_mask = 0;
1079         u32 prio = 0, prio_mask = 0;
1080         u32 old_index = tab->index;
1081
1082         /* Check if vlan is wanted */
1083         if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
1084                 if (!rule->m_ext.vlan_tci)
1085                         rule->m_ext.vlan_tci = 0xFFFF;
1086
1087                 vlan = RQFPR_VLN;
1088                 vlan_mask = RQFPR_VLN;
1089
1090                 /* Separate the fields */
1091                 id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
1092                 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
1093                 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
1094                 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
1095                 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
1096                        VLAN_PRIO_SHIFT;
1097                 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
1098                             VLAN_PRIO_SHIFT;
1099
1100                 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
1101                         vlan |= RQFPR_CFI;
1102                         vlan_mask |= RQFPR_CFI;
1103                 } else if (cfi != VLAN_TAG_PRESENT &&
1104                            cfi_mask == VLAN_TAG_PRESENT) {
1105                         vlan_mask |= RQFPR_CFI;
1106                 }
1107         }
1108
1109         switch (rule->flow_type & ~FLOW_EXT) {
1110         case TCP_V4_FLOW:
1111                 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1112                                     RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1113                 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1114                                   &rule->m_u.tcp_ip4_spec, tab);
1115                 break;
1116         case UDP_V4_FLOW:
1117                 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1118                                     RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1119                 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1120                                   &rule->m_u.udp_ip4_spec, tab);
1121                 break;
1122         case SCTP_V4_FLOW:
1123                 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1124                                     tab);
1125                 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1126                 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1127                                   (struct ethtool_tcpip4_spec *)&rule->m_u,
1128                                   tab);
1129                 break;
1130         case IP_USER_FLOW:
1131                 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1132                                     tab);
1133                 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1134                                  (struct ethtool_usrip4_spec *) &rule->m_u,
1135                                  tab);
1136                 break;
1137         case ETHER_FLOW:
1138                 if (vlan)
1139                         gfar_set_parse_bits(vlan, vlan_mask, tab);
1140                 gfar_set_ether((struct ethhdr *) &rule->h_u,
1141                                (struct ethhdr *) &rule->m_u, tab);
1142                 break;
1143         default:
1144                 return -1;
1145         }
1146
1147         /* Set the vlan attributes in the end */
1148         if (vlan) {
1149                 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1150                 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1151         }
1152
1153         /* If there has been nothing written till now, it must be a default */
1154         if (tab->index == old_index) {
1155                 gfar_set_mask(0xFFFFFFFF, tab);
1156                 tab->fe[tab->index].ctrl = 0x20;
1157                 tab->fe[tab->index].prop = 0x0;
1158                 tab->index++;
1159         }
1160
1161         /* Remove last AND */
1162         tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1163
1164         /* Specify which queue to use or to drop */
1165         if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1166                 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1167         else
1168                 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1169
1170         /* Only big enough entries can be clustered */
1171         if (tab->index > (old_index + 2)) {
1172                 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1173                 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1174         }
1175
1176         /* In rare cases the cache can be full while there is
1177          * free space in hw
1178          */
1179         if (tab->index > MAX_FILER_CACHE_IDX - 1)
1180                 return -EBUSY;
1181
1182         return 0;
1183 }
1184
1185 /* Copy size filer entries */
1186 static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1187                                     struct gfar_filer_entry src[0], s32 size)
1188 {
1189         while (size > 0) {
1190                 size--;
1191                 dst[size].ctrl = src[size].ctrl;
1192                 dst[size].prop = src[size].prop;
1193         }
1194 }
1195
1196 /* Delete the contents of the filer-table between start and end
1197  * and collapse them
1198  */
1199 static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1200 {
1201         int length;
1202
1203         if (end > MAX_FILER_CACHE_IDX || end < begin)
1204                 return -EINVAL;
1205
1206         end++;
1207         length = end - begin;
1208
1209         /* Copy */
1210         while (end < tab->index) {
1211                 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1212                 tab->fe[begin++].prop = tab->fe[end++].prop;
1213
1214         }
1215         /* Fill up with don't cares */
1216         while (begin < tab->index) {
1217                 tab->fe[begin].ctrl = 0x60;
1218                 tab->fe[begin].prop = 0xFFFFFFFF;
1219                 begin++;
1220         }
1221
1222         tab->index -= length;
1223         return 0;
1224 }
1225
1226 /* Make space on the wanted location */
1227 static int gfar_expand_filer_entries(u32 begin, u32 length,
1228                                      struct filer_table *tab)
1229 {
1230         if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1231             begin > MAX_FILER_CACHE_IDX)
1232                 return -EINVAL;
1233
1234         gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1235                                 tab->index - length + 1);
1236
1237         tab->index += length;
1238         return 0;
1239 }
1240
1241 static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1242 {
1243         for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1244              start++) {
1245                 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1246                     (RQFCR_AND | RQFCR_CLE))
1247                         return start;
1248         }
1249         return -1;
1250 }
1251
1252 static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1253 {
1254         for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1255              start++) {
1256                 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1257                     (RQFCR_CLE))
1258                         return start;
1259         }
1260         return -1;
1261 }
1262
1263 /* Uses hardwares clustering option to reduce
1264  * the number of filer table entries
1265  */
1266 static void gfar_cluster_filer(struct filer_table *tab)
1267 {
1268         s32 i = -1, j, iend, jend;
1269
1270         while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1271                 j = i;
1272                 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1273                         /* The cluster entries self and the previous one
1274                          * (a mask) must be identical!
1275                          */
1276                         if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1277                                 break;
1278                         if (tab->fe[i].prop != tab->fe[j].prop)
1279                                 break;
1280                         if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1281                                 break;
1282                         if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1283                                 break;
1284                         iend = gfar_get_next_cluster_end(i, tab);
1285                         jend = gfar_get_next_cluster_end(j, tab);
1286                         if (jend == -1 || iend == -1)
1287                                 break;
1288
1289                         /* First we make some free space, where our cluster
1290                          * element should be. Then we copy it there and finally
1291                          * delete in from its old location.
1292                          */
1293                         if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1294                             -EINVAL)
1295                                 break;
1296
1297                         gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1298                                                 &(tab->fe[jend + 1]), jend - j);
1299
1300                         if (gfar_trim_filer_entries(jend - 1,
1301                                                     jend + (jend - j),
1302                                                     tab) == -EINVAL)
1303                                 return;
1304
1305                         /* Mask out cluster bit */
1306                         tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1307                 }
1308         }
1309 }
1310
1311 /* Swaps the masked bits of a1<>a2 and b1<>b2 */
1312 static void gfar_swap_bits(struct gfar_filer_entry *a1,
1313                            struct gfar_filer_entry *a2,
1314                            struct gfar_filer_entry *b1,
1315                            struct gfar_filer_entry *b2, u32 mask)
1316 {
1317         u32 temp[4];
1318         temp[0] = a1->ctrl & mask;
1319         temp[1] = a2->ctrl & mask;
1320         temp[2] = b1->ctrl & mask;
1321         temp[3] = b2->ctrl & mask;
1322
1323         a1->ctrl &= ~mask;
1324         a2->ctrl &= ~mask;
1325         b1->ctrl &= ~mask;
1326         b2->ctrl &= ~mask;
1327
1328         a1->ctrl |= temp[1];
1329         a2->ctrl |= temp[0];
1330         b1->ctrl |= temp[3];
1331         b2->ctrl |= temp[2];
1332 }
1333
1334 /* Generate a list consisting of masks values with their start and
1335  * end of validity and block as indicator for parts belonging
1336  * together (glued by ANDs) in mask_table
1337  */
1338 static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1339                                     struct filer_table *tab)
1340 {
1341         u32 i, and_index = 0, block_index = 1;
1342
1343         for (i = 0; i < tab->index; i++) {
1344
1345                 /* LSByte of control = 0 sets a mask */
1346                 if (!(tab->fe[i].ctrl & 0xF)) {
1347                         mask_table[and_index].mask = tab->fe[i].prop;
1348                         mask_table[and_index].start = i;
1349                         mask_table[and_index].block = block_index;
1350                         if (and_index >= 1)
1351                                 mask_table[and_index - 1].end = i - 1;
1352                         and_index++;
1353                 }
1354                 /* cluster starts and ends will be separated because they should
1355                  * hold their position
1356                  */
1357                 if (tab->fe[i].ctrl & RQFCR_CLE)
1358                         block_index++;
1359                 /* A not set AND indicates the end of a depended block */
1360                 if (!(tab->fe[i].ctrl & RQFCR_AND))
1361                         block_index++;
1362         }
1363
1364         mask_table[and_index - 1].end = i - 1;
1365
1366         return and_index;
1367 }
1368
1369 /* Sorts the entries of mask_table by the values of the masks.
1370  * Important: The 0xFF80 flags of the first and last entry of a
1371  * block must hold their position (which queue, CLusterEnable, ReJEct,
1372  * AND)
1373  */
1374 static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1375                                  struct filer_table *temp_table, u32 and_index)
1376 {
1377         /* Pointer to compare function (_asc or _desc) */
1378         int (*gfar_comp)(const void *, const void *);
1379
1380         u32 i, size = 0, start = 0, prev = 1;
1381         u32 old_first, old_last, new_first, new_last;
1382
1383         gfar_comp = &gfar_comp_desc;
1384
1385         for (i = 0; i < and_index; i++) {
1386                 if (prev != mask_table[i].block) {
1387                         old_first = mask_table[start].start + 1;
1388                         old_last = mask_table[i - 1].end;
1389                         sort(mask_table + start, size,
1390                              sizeof(struct gfar_mask_entry),
1391                              gfar_comp, &gfar_swap);
1392
1393                         /* Toggle order for every block. This makes the
1394                          * thing more efficient!
1395                          */
1396                         if (gfar_comp == gfar_comp_desc)
1397                                 gfar_comp = &gfar_comp_asc;
1398                         else
1399                                 gfar_comp = &gfar_comp_desc;
1400
1401                         new_first = mask_table[start].start + 1;
1402                         new_last = mask_table[i - 1].end;
1403
1404                         gfar_swap_bits(&temp_table->fe[new_first],
1405                                        &temp_table->fe[old_first],
1406                                        &temp_table->fe[new_last],
1407                                        &temp_table->fe[old_last],
1408                                        RQFCR_QUEUE | RQFCR_CLE |
1409                                        RQFCR_RJE | RQFCR_AND);
1410
1411                         start = i;
1412                         size = 0;
1413                 }
1414                 size++;
1415                 prev = mask_table[i].block;
1416         }
1417 }
1418
1419 /* Reduces the number of masks needed in the filer table to save entries
1420  * This is done by sorting the masks of a depended block. A depended block is
1421  * identified by gluing ANDs or CLE. The sorting order toggles after every
1422  * block. Of course entries in scope of a mask must change their location with
1423  * it.
1424  */
1425 static int gfar_optimize_filer_masks(struct filer_table *tab)
1426 {
1427         struct filer_table *temp_table;
1428         struct gfar_mask_entry *mask_table;
1429
1430         u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1431         s32 ret = 0;
1432
1433         /* We need a copy of the filer table because
1434          * we want to change its order
1435          */
1436         temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1437         if (temp_table == NULL)
1438                 return -ENOMEM;
1439
1440         mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1441                              sizeof(struct gfar_mask_entry), GFP_KERNEL);
1442
1443         if (mask_table == NULL) {
1444                 ret = -ENOMEM;
1445                 goto end;
1446         }
1447
1448         and_index = gfar_generate_mask_table(mask_table, tab);
1449
1450         gfar_sort_mask_table(mask_table, temp_table, and_index);
1451
1452         /* Now we can copy the data from our duplicated filer table to
1453          * the real one in the order the mask table says
1454          */
1455         for (i = 0; i < and_index; i++) {
1456                 size = mask_table[i].end - mask_table[i].start + 1;
1457                 gfar_copy_filer_entries(&(tab->fe[j]),
1458                                 &(temp_table->fe[mask_table[i].start]), size);
1459                 j += size;
1460         }
1461
1462         /* And finally we just have to check for duplicated masks and drop the
1463          * second ones
1464          */
1465         for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1466                 if (tab->fe[i].ctrl == 0x80) {
1467                         previous_mask = i++;
1468                         break;
1469                 }
1470         }
1471         for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1472                 if (tab->fe[i].ctrl == 0x80) {
1473                         if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1474                                 /* Two identical ones found!
1475                                  * So drop the second one!
1476                                  */
1477                                 gfar_trim_filer_entries(i, i, tab);
1478                         } else
1479                                 /* Not identical! */
1480                                 previous_mask = i;
1481                 }
1482         }
1483
1484         kfree(mask_table);
1485 end:    kfree(temp_table);
1486         return ret;
1487 }
1488
1489 /* Write the bit-pattern from software's buffer to hardware registers */
1490 static int gfar_write_filer_table(struct gfar_private *priv,
1491                                   struct filer_table *tab)
1492 {
1493         u32 i = 0;
1494         if (tab->index > MAX_FILER_IDX - 1)
1495                 return -EBUSY;
1496
1497         /* Avoid inconsistent filer table to be processed */
1498         lock_rx_qs(priv);
1499
1500         /* Fill regular entries */
1501         for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1502              i++)
1503                 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1504         /* Fill the rest with fall-troughs */
1505         for (; i < MAX_FILER_IDX - 1; i++)
1506                 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1507         /* Last entry must be default accept
1508          * because that's what people expect
1509          */
1510         gfar_write_filer(priv, i, 0x20, 0x0);
1511
1512         unlock_rx_qs(priv);
1513
1514         return 0;
1515 }
1516
1517 static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1518                                  struct gfar_private *priv)
1519 {
1520
1521         if (flow->flow_type & FLOW_EXT) {
1522                 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1523                         netdev_warn(priv->ndev,
1524                                     "User-specific data not supported!\n");
1525                 if (~flow->m_ext.vlan_etype)
1526                         netdev_warn(priv->ndev,
1527                                     "VLAN-etype not supported!\n");
1528         }
1529         if (flow->flow_type == IP_USER_FLOW)
1530                 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1531                         netdev_warn(priv->ndev,
1532                                     "IP-Version differing from IPv4 not supported!\n");
1533
1534         return 0;
1535 }
1536
1537 static int gfar_process_filer_changes(struct gfar_private *priv)
1538 {
1539         struct ethtool_flow_spec_container *j;
1540         struct filer_table *tab;
1541         s32 i = 0;
1542         s32 ret = 0;
1543
1544         /* So index is set to zero, too! */
1545         tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1546         if (tab == NULL)
1547                 return -ENOMEM;
1548
1549         /* Now convert the existing filer data from flow_spec into
1550          * filer tables binary format
1551          */
1552         list_for_each_entry(j, &priv->rx_list.list, list) {
1553                 ret = gfar_convert_to_filer(&j->fs, tab);
1554                 if (ret == -EBUSY) {
1555                         netdev_err(priv->ndev,
1556                                    "Rule not added: No free space!\n");
1557                         goto end;
1558                 }
1559                 if (ret == -1) {
1560                         netdev_err(priv->ndev,
1561                                    "Rule not added: Unsupported Flow-type!\n");
1562                         goto end;
1563                 }
1564         }
1565
1566         i = tab->index;
1567
1568         /* Optimizations to save entries */
1569         gfar_cluster_filer(tab);
1570         gfar_optimize_filer_masks(tab);
1571
1572         pr_debug("\n\tSummary:\n"
1573                  "\tData on hardware: %d\n"
1574                  "\tCompression rate: %d%%\n",
1575                  tab->index, 100 - (100 * tab->index) / i);
1576
1577         /* Write everything to hardware */
1578         ret = gfar_write_filer_table(priv, tab);
1579         if (ret == -EBUSY) {
1580                 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1581                 goto end;
1582         }
1583
1584 end:
1585         kfree(tab);
1586         return ret;
1587 }
1588
1589 static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1590 {
1591         u32 i = 0;
1592
1593         for (i = 0; i < sizeof(flow->m_u); i++)
1594                 flow->m_u.hdata[i] ^= 0xFF;
1595
1596         flow->m_ext.vlan_etype ^= 0xFFFF;
1597         flow->m_ext.vlan_tci ^= 0xFFFF;
1598         flow->m_ext.data[0] ^= ~0;
1599         flow->m_ext.data[1] ^= ~0;
1600 }
1601
1602 static int gfar_add_cls(struct gfar_private *priv,
1603                         struct ethtool_rx_flow_spec *flow)
1604 {
1605         struct ethtool_flow_spec_container *temp, *comp;
1606         int ret = 0;
1607
1608         temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1609         if (temp == NULL)
1610                 return -ENOMEM;
1611         memcpy(&temp->fs, flow, sizeof(temp->fs));
1612
1613         gfar_invert_masks(&temp->fs);
1614         ret = gfar_check_capability(&temp->fs, priv);
1615         if (ret)
1616                 goto clean_mem;
1617         /* Link in the new element at the right @location */
1618         if (list_empty(&priv->rx_list.list)) {
1619                 ret = gfar_check_filer_hardware(priv);
1620                 if (ret != 0)
1621                         goto clean_mem;
1622                 list_add(&temp->list, &priv->rx_list.list);
1623                 goto process;
1624         } else {
1625                 list_for_each_entry(comp, &priv->rx_list.list, list) {
1626                         if (comp->fs.location > flow->location) {
1627                                 list_add_tail(&temp->list, &comp->list);
1628                                 goto process;
1629                         }
1630                         if (comp->fs.location == flow->location) {
1631                                 netdev_err(priv->ndev,
1632                                            "Rule not added: ID %d not free!\n",
1633                                            flow->location);
1634                                 ret = -EBUSY;
1635                                 goto clean_mem;
1636                         }
1637                 }
1638                 list_add_tail(&temp->list, &priv->rx_list.list);
1639         }
1640
1641 process:
1642         ret = gfar_process_filer_changes(priv);
1643         if (ret)
1644                 goto clean_list;
1645         priv->rx_list.count++;
1646         return ret;
1647
1648 clean_list:
1649         list_del(&temp->list);
1650 clean_mem:
1651         kfree(temp);
1652         return ret;
1653 }
1654
1655 static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1656 {
1657         struct ethtool_flow_spec_container *comp;
1658         u32 ret = -EINVAL;
1659
1660         if (list_empty(&priv->rx_list.list))
1661                 return ret;
1662
1663         list_for_each_entry(comp, &priv->rx_list.list, list) {
1664                 if (comp->fs.location == loc) {
1665                         list_del(&comp->list);
1666                         kfree(comp);
1667                         priv->rx_list.count--;
1668                         gfar_process_filer_changes(priv);
1669                         ret = 0;
1670                         break;
1671                 }
1672         }
1673
1674         return ret;
1675 }
1676
1677 static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1678 {
1679         struct ethtool_flow_spec_container *comp;
1680         u32 ret = -EINVAL;
1681
1682         list_for_each_entry(comp, &priv->rx_list.list, list) {
1683                 if (comp->fs.location == cmd->fs.location) {
1684                         memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1685                         gfar_invert_masks(&cmd->fs);
1686                         ret = 0;
1687                         break;
1688                 }
1689         }
1690
1691         return ret;
1692 }
1693
1694 static int gfar_get_cls_all(struct gfar_private *priv,
1695                             struct ethtool_rxnfc *cmd, u32 *rule_locs)
1696 {
1697         struct ethtool_flow_spec_container *comp;
1698         u32 i = 0;
1699
1700         list_for_each_entry(comp, &priv->rx_list.list, list) {
1701                 if (i == cmd->rule_cnt)
1702                         return -EMSGSIZE;
1703                 rule_locs[i] = comp->fs.location;
1704                 i++;
1705         }
1706
1707         cmd->data = MAX_FILER_IDX;
1708         cmd->rule_cnt = i;
1709
1710         return 0;
1711 }
1712
1713 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1714 {
1715         struct gfar_private *priv = netdev_priv(dev);
1716         int ret = 0;
1717
1718         mutex_lock(&priv->rx_queue_access);
1719
1720         switch (cmd->cmd) {
1721         case ETHTOOL_SRXFH:
1722                 ret = gfar_set_hash_opts(priv, cmd);
1723                 break;
1724         case ETHTOOL_SRXCLSRLINS:
1725                 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1726                      cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1727                     cmd->fs.location >= MAX_FILER_IDX) {
1728                         ret = -EINVAL;
1729                         break;
1730                 }
1731                 ret = gfar_add_cls(priv, &cmd->fs);
1732                 break;
1733         case ETHTOOL_SRXCLSRLDEL:
1734                 ret = gfar_del_cls(priv, cmd->fs.location);
1735                 break;
1736         default:
1737                 ret = -EINVAL;
1738         }
1739
1740         mutex_unlock(&priv->rx_queue_access);
1741
1742         return ret;
1743 }
1744
1745 static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1746                         u32 *rule_locs)
1747 {
1748         struct gfar_private *priv = netdev_priv(dev);
1749         int ret = 0;
1750
1751         switch (cmd->cmd) {
1752         case ETHTOOL_GRXRINGS:
1753                 cmd->data = priv->num_rx_queues;
1754                 break;
1755         case ETHTOOL_GRXCLSRLCNT:
1756                 cmd->rule_cnt = priv->rx_list.count;
1757                 break;
1758         case ETHTOOL_GRXCLSRULE:
1759                 ret = gfar_get_cls(priv, cmd);
1760                 break;
1761         case ETHTOOL_GRXCLSRLALL:
1762                 ret = gfar_get_cls_all(priv, cmd, rule_locs);
1763                 break;
1764         default:
1765                 ret = -EINVAL;
1766                 break;
1767         }
1768
1769         return ret;
1770 }
1771
1772 int gfar_phc_index = -1;
1773 EXPORT_SYMBOL(gfar_phc_index);
1774
1775 static int gfar_get_ts_info(struct net_device *dev,
1776                             struct ethtool_ts_info *info)
1777 {
1778         struct gfar_private *priv = netdev_priv(dev);
1779
1780         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1781                 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1782                                         SOF_TIMESTAMPING_SOFTWARE;
1783                 info->phc_index = -1;
1784                 return 0;
1785         }
1786         info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1787                                 SOF_TIMESTAMPING_RX_HARDWARE |
1788                                 SOF_TIMESTAMPING_RAW_HARDWARE;
1789         info->phc_index = gfar_phc_index;
1790         info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1791                          (1 << HWTSTAMP_TX_ON);
1792         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1793                            (1 << HWTSTAMP_FILTER_ALL);
1794         return 0;
1795 }
1796
1797 const struct ethtool_ops gfar_ethtool_ops = {
1798         .get_settings = gfar_gsettings,
1799         .set_settings = gfar_ssettings,
1800         .get_drvinfo = gfar_gdrvinfo,
1801         .get_regs_len = gfar_reglen,
1802         .get_regs = gfar_get_regs,
1803         .get_link = ethtool_op_get_link,
1804         .get_coalesce = gfar_gcoalesce,
1805         .set_coalesce = gfar_scoalesce,
1806         .get_ringparam = gfar_gringparam,
1807         .set_ringparam = gfar_sringparam,
1808         .get_strings = gfar_gstrings,
1809         .get_sset_count = gfar_sset_count,
1810         .get_ethtool_stats = gfar_fill_stats,
1811         .get_msglevel = gfar_get_msglevel,
1812         .set_msglevel = gfar_set_msglevel,
1813 #ifdef CONFIG_PM
1814         .get_wol = gfar_get_wol,
1815         .set_wol = gfar_set_wol,
1816 #endif
1817         .set_rxnfc = gfar_set_nfc,
1818         .get_rxnfc = gfar_get_nfc,
1819         .get_ts_info = gfar_get_ts_info,
1820 };