]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/net/usb/lan78xx.c
Merge 'net-next/master'
[linux-beck.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35
36 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME     "lan78xx"
39 #define DRIVER_VERSION  "1.0.3"
40
41 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
42 #define THROTTLE_JIFFIES                (HZ / 8)
43 #define UNLINK_TIMEOUT_MS               3
44
45 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
46
47 #define SS_USB_PKT_SIZE                 (1024)
48 #define HS_USB_PKT_SIZE                 (512)
49 #define FS_USB_PKT_SIZE                 (64)
50
51 #define MAX_RX_FIFO_SIZE                (12 * 1024)
52 #define MAX_TX_FIFO_SIZE                (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY           (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE          (9000)
56 #define DEFAULT_TX_CSUM_ENABLE          (true)
57 #define DEFAULT_RX_CSUM_ENABLE          (true)
58 #define DEFAULT_TSO_CSUM_ENABLE         (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
60 #define TX_OVERHEAD                     (8)
61 #define RXW_PADDING                     2
62
63 #define LAN78XX_USB_VENDOR_ID           (0x0424)
64 #define LAN7800_USB_PRODUCT_ID          (0x7800)
65 #define LAN7850_USB_PRODUCT_ID          (0x7850)
66 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
67 #define LAN78XX_OTP_MAGIC               (0x78F3)
68
69 #define MII_READ                        1
70 #define MII_WRITE                       0
71
72 #define EEPROM_INDICATOR                (0xA5)
73 #define EEPROM_MAC_OFFSET               (0x01)
74 #define MAX_EEPROM_SIZE                 512
75 #define OTP_INDICATOR_1                 (0xF3)
76 #define OTP_INDICATOR_2                 (0xF7)
77
78 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
79                                          WAKE_MCAST | WAKE_BCAST | \
80                                          WAKE_ARP | WAKE_MAGIC)
81
82 /* USB related defines */
83 #define BULK_IN_PIPE                    1
84 #define BULK_OUT_PIPE                   2
85
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
88
89 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90         "RX FCS Errors",
91         "RX Alignment Errors",
92         "Rx Fragment Errors",
93         "RX Jabber Errors",
94         "RX Undersize Frame Errors",
95         "RX Oversize Frame Errors",
96         "RX Dropped Frames",
97         "RX Unicast Byte Count",
98         "RX Broadcast Byte Count",
99         "RX Multicast Byte Count",
100         "RX Unicast Frames",
101         "RX Broadcast Frames",
102         "RX Multicast Frames",
103         "RX Pause Frames",
104         "RX 64 Byte Frames",
105         "RX 65 - 127 Byte Frames",
106         "RX 128 - 255 Byte Frames",
107         "RX 256 - 511 Bytes Frames",
108         "RX 512 - 1023 Byte Frames",
109         "RX 1024 - 1518 Byte Frames",
110         "RX Greater 1518 Byte Frames",
111         "EEE RX LPI Transitions",
112         "EEE RX LPI Time",
113         "TX FCS Errors",
114         "TX Excess Deferral Errors",
115         "TX Carrier Errors",
116         "TX Bad Byte Count",
117         "TX Single Collisions",
118         "TX Multiple Collisions",
119         "TX Excessive Collision",
120         "TX Late Collisions",
121         "TX Unicast Byte Count",
122         "TX Broadcast Byte Count",
123         "TX Multicast Byte Count",
124         "TX Unicast Frames",
125         "TX Broadcast Frames",
126         "TX Multicast Frames",
127         "TX Pause Frames",
128         "TX 64 Byte Frames",
129         "TX 65 - 127 Byte Frames",
130         "TX 128 - 255 Byte Frames",
131         "TX 256 - 511 Bytes Frames",
132         "TX 512 - 1023 Byte Frames",
133         "TX 1024 - 1518 Byte Frames",
134         "TX Greater 1518 Byte Frames",
135         "EEE TX LPI Transitions",
136         "EEE TX LPI Time",
137 };
138
139 struct lan78xx_statstage {
140         u32 rx_fcs_errors;
141         u32 rx_alignment_errors;
142         u32 rx_fragment_errors;
143         u32 rx_jabber_errors;
144         u32 rx_undersize_frame_errors;
145         u32 rx_oversize_frame_errors;
146         u32 rx_dropped_frames;
147         u32 rx_unicast_byte_count;
148         u32 rx_broadcast_byte_count;
149         u32 rx_multicast_byte_count;
150         u32 rx_unicast_frames;
151         u32 rx_broadcast_frames;
152         u32 rx_multicast_frames;
153         u32 rx_pause_frames;
154         u32 rx_64_byte_frames;
155         u32 rx_65_127_byte_frames;
156         u32 rx_128_255_byte_frames;
157         u32 rx_256_511_bytes_frames;
158         u32 rx_512_1023_byte_frames;
159         u32 rx_1024_1518_byte_frames;
160         u32 rx_greater_1518_byte_frames;
161         u32 eee_rx_lpi_transitions;
162         u32 eee_rx_lpi_time;
163         u32 tx_fcs_errors;
164         u32 tx_excess_deferral_errors;
165         u32 tx_carrier_errors;
166         u32 tx_bad_byte_count;
167         u32 tx_single_collisions;
168         u32 tx_multiple_collisions;
169         u32 tx_excessive_collision;
170         u32 tx_late_collisions;
171         u32 tx_unicast_byte_count;
172         u32 tx_broadcast_byte_count;
173         u32 tx_multicast_byte_count;
174         u32 tx_unicast_frames;
175         u32 tx_broadcast_frames;
176         u32 tx_multicast_frames;
177         u32 tx_pause_frames;
178         u32 tx_64_byte_frames;
179         u32 tx_65_127_byte_frames;
180         u32 tx_128_255_byte_frames;
181         u32 tx_256_511_bytes_frames;
182         u32 tx_512_1023_byte_frames;
183         u32 tx_1024_1518_byte_frames;
184         u32 tx_greater_1518_byte_frames;
185         u32 eee_tx_lpi_transitions;
186         u32 eee_tx_lpi_time;
187 };
188
189 struct lan78xx_net;
190
191 struct lan78xx_priv {
192         struct lan78xx_net *dev;
193         u32 rfe_ctl;
194         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197         struct mutex dataport_mutex; /* for dataport access */
198         spinlock_t rfe_ctl_lock; /* for rfe register access */
199         struct work_struct set_multicast;
200         struct work_struct set_vlan;
201         u32 wol;
202 };
203
204 enum skb_state {
205         illegal = 0,
206         tx_start,
207         tx_done,
208         rx_start,
209         rx_done,
210         rx_cleanup,
211         unlink_start
212 };
213
214 struct skb_data {               /* skb->cb is one of these */
215         struct urb *urb;
216         struct lan78xx_net *dev;
217         enum skb_state state;
218         size_t length;
219 };
220
221 struct usb_context {
222         struct usb_ctrlrequest req;
223         struct lan78xx_net *dev;
224 };
225
226 #define EVENT_TX_HALT                   0
227 #define EVENT_RX_HALT                   1
228 #define EVENT_RX_MEMORY                 2
229 #define EVENT_STS_SPLIT                 3
230 #define EVENT_LINK_RESET                4
231 #define EVENT_RX_PAUSED                 5
232 #define EVENT_DEV_WAKING                6
233 #define EVENT_DEV_ASLEEP                7
234 #define EVENT_DEV_OPEN                  8
235
236 struct lan78xx_net {
237         struct net_device       *net;
238         struct usb_device       *udev;
239         struct usb_interface    *intf;
240         void                    *driver_priv;
241
242         int                     rx_qlen;
243         int                     tx_qlen;
244         struct sk_buff_head     rxq;
245         struct sk_buff_head     txq;
246         struct sk_buff_head     done;
247         struct sk_buff_head     rxq_pause;
248         struct sk_buff_head     txq_pend;
249
250         struct tasklet_struct   bh;
251         struct delayed_work     wq;
252
253         struct usb_host_endpoint *ep_blkin;
254         struct usb_host_endpoint *ep_blkout;
255         struct usb_host_endpoint *ep_intr;
256
257         int                     msg_enable;
258
259         struct urb              *urb_intr;
260         struct usb_anchor       deferred;
261
262         struct mutex            phy_mutex; /* for phy access */
263         unsigned                pipe_in, pipe_out, pipe_intr;
264
265         u32                     hard_mtu;       /* count any extra framing */
266         size_t                  rx_urb_size;    /* size for rx urbs */
267
268         unsigned long           flags;
269
270         wait_queue_head_t       *wait;
271         unsigned char           suspend_count;
272
273         unsigned                maxpacket;
274         struct timer_list       delay;
275
276         unsigned long           data[5];
277
278         int                     link_on;
279         u8                      mdix_ctrl;
280
281         u32                     chipid;
282         u32                     chiprev;
283         struct mii_bus          *mdiobus;
284
285         int                     fc_autoneg;
286         u8                      fc_request_control;
287 };
288
289 /* use ethtool to change the level for any given device */
290 static int msg_level = -1;
291 module_param(msg_level, int, 0);
292 MODULE_PARM_DESC(msg_level, "Override default message level");
293
294 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
295 {
296         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
297         int ret;
298
299         if (!buf)
300                 return -ENOMEM;
301
302         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
303                               USB_VENDOR_REQUEST_READ_REGISTER,
304                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
305                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
306         if (likely(ret >= 0)) {
307                 le32_to_cpus(buf);
308                 *data = *buf;
309         } else {
310                 netdev_warn(dev->net,
311                             "Failed to read register index 0x%08x. ret = %d",
312                             index, ret);
313         }
314
315         kfree(buf);
316
317         return ret;
318 }
319
320 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
321 {
322         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
323         int ret;
324
325         if (!buf)
326                 return -ENOMEM;
327
328         *buf = data;
329         cpu_to_le32s(buf);
330
331         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
332                               USB_VENDOR_REQUEST_WRITE_REGISTER,
333                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
334                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
335         if (unlikely(ret < 0)) {
336                 netdev_warn(dev->net,
337                             "Failed to write register index 0x%08x. ret = %d",
338                             index, ret);
339         }
340
341         kfree(buf);
342
343         return ret;
344 }
345
346 static int lan78xx_read_stats(struct lan78xx_net *dev,
347                               struct lan78xx_statstage *data)
348 {
349         int ret = 0;
350         int i;
351         struct lan78xx_statstage *stats;
352         u32 *src;
353         u32 *dst;
354
355         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
356         if (!stats)
357                 return -ENOMEM;
358
359         ret = usb_control_msg(dev->udev,
360                               usb_rcvctrlpipe(dev->udev, 0),
361                               USB_VENDOR_REQUEST_GET_STATS,
362                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
363                               0,
364                               0,
365                               (void *)stats,
366                               sizeof(*stats),
367                               USB_CTRL_SET_TIMEOUT);
368         if (likely(ret >= 0)) {
369                 src = (u32 *)stats;
370                 dst = (u32 *)data;
371                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
372                         le32_to_cpus(&src[i]);
373                         dst[i] = src[i];
374                 }
375         } else {
376                 netdev_warn(dev->net,
377                             "Failed to read stat ret = 0x%x", ret);
378         }
379
380         kfree(stats);
381
382         return ret;
383 }
384
385 /* Loop until the read is completed with timeout called with phy_mutex held */
386 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
387 {
388         unsigned long start_time = jiffies;
389         u32 val;
390         int ret;
391
392         do {
393                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
394                 if (unlikely(ret < 0))
395                         return -EIO;
396
397                 if (!(val & MII_ACC_MII_BUSY_))
398                         return 0;
399         } while (!time_after(jiffies, start_time + HZ));
400
401         return -EIO;
402 }
403
404 static inline u32 mii_access(int id, int index, int read)
405 {
406         u32 ret;
407
408         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
409         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
410         if (read)
411                 ret |= MII_ACC_MII_READ_;
412         else
413                 ret |= MII_ACC_MII_WRITE_;
414         ret |= MII_ACC_MII_BUSY_;
415
416         return ret;
417 }
418
419 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
420 {
421         unsigned long start_time = jiffies;
422         u32 val;
423         int ret;
424
425         do {
426                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
427                 if (unlikely(ret < 0))
428                         return -EIO;
429
430                 if (!(val & E2P_CMD_EPC_BUSY_) ||
431                     (val & E2P_CMD_EPC_TIMEOUT_))
432                         break;
433                 usleep_range(40, 100);
434         } while (!time_after(jiffies, start_time + HZ));
435
436         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
437                 netdev_warn(dev->net, "EEPROM read operation timeout");
438                 return -EIO;
439         }
440
441         return 0;
442 }
443
444 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
445 {
446         unsigned long start_time = jiffies;
447         u32 val;
448         int ret;
449
450         do {
451                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
452                 if (unlikely(ret < 0))
453                         return -EIO;
454
455                 if (!(val & E2P_CMD_EPC_BUSY_))
456                         return 0;
457
458                 usleep_range(40, 100);
459         } while (!time_after(jiffies, start_time + HZ));
460
461         netdev_warn(dev->net, "EEPROM is busy");
462         return -EIO;
463 }
464
465 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
466                                    u32 length, u8 *data)
467 {
468         u32 val;
469         u32 saved;
470         int i, ret;
471         int retval;
472
473         /* depends on chip, some EEPROM pins are muxed with LED function.
474          * disable & restore LED function to access EEPROM.
475          */
476         ret = lan78xx_read_reg(dev, HW_CFG, &val);
477         saved = val;
478         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
479                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
480                 ret = lan78xx_write_reg(dev, HW_CFG, val);
481         }
482
483         retval = lan78xx_eeprom_confirm_not_busy(dev);
484         if (retval)
485                 return retval;
486
487         for (i = 0; i < length; i++) {
488                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
489                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
490                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
491                 if (unlikely(ret < 0)) {
492                         retval = -EIO;
493                         goto exit;
494                 }
495
496                 retval = lan78xx_wait_eeprom(dev);
497                 if (retval < 0)
498                         goto exit;
499
500                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
501                 if (unlikely(ret < 0)) {
502                         retval = -EIO;
503                         goto exit;
504                 }
505
506                 data[i] = val & 0xFF;
507                 offset++;
508         }
509
510         retval = 0;
511 exit:
512         if (dev->chipid == ID_REV_CHIP_ID_7800_)
513                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
514
515         return retval;
516 }
517
518 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
519                                u32 length, u8 *data)
520 {
521         u8 sig;
522         int ret;
523
524         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
525         if ((ret == 0) && (sig == EEPROM_INDICATOR))
526                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
527         else
528                 ret = -EINVAL;
529
530         return ret;
531 }
532
533 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
534                                     u32 length, u8 *data)
535 {
536         u32 val;
537         u32 saved;
538         int i, ret;
539         int retval;
540
541         /* depends on chip, some EEPROM pins are muxed with LED function.
542          * disable & restore LED function to access EEPROM.
543          */
544         ret = lan78xx_read_reg(dev, HW_CFG, &val);
545         saved = val;
546         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
547                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
548                 ret = lan78xx_write_reg(dev, HW_CFG, val);
549         }
550
551         retval = lan78xx_eeprom_confirm_not_busy(dev);
552         if (retval)
553                 goto exit;
554
555         /* Issue write/erase enable command */
556         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
557         ret = lan78xx_write_reg(dev, E2P_CMD, val);
558         if (unlikely(ret < 0)) {
559                 retval = -EIO;
560                 goto exit;
561         }
562
563         retval = lan78xx_wait_eeprom(dev);
564         if (retval < 0)
565                 goto exit;
566
567         for (i = 0; i < length; i++) {
568                 /* Fill data register */
569                 val = data[i];
570                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
571                 if (ret < 0) {
572                         retval = -EIO;
573                         goto exit;
574                 }
575
576                 /* Send "write" command */
577                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
578                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
579                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
580                 if (ret < 0) {
581                         retval = -EIO;
582                         goto exit;
583                 }
584
585                 retval = lan78xx_wait_eeprom(dev);
586                 if (retval < 0)
587                         goto exit;
588
589                 offset++;
590         }
591
592         retval = 0;
593 exit:
594         if (dev->chipid == ID_REV_CHIP_ID_7800_)
595                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
596
597         return retval;
598 }
599
600 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
601                                 u32 length, u8 *data)
602 {
603         int i;
604         int ret;
605         u32 buf;
606         unsigned long timeout;
607
608         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
609
610         if (buf & OTP_PWR_DN_PWRDN_N_) {
611                 /* clear it and wait to be cleared */
612                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
613
614                 timeout = jiffies + HZ;
615                 do {
616                         usleep_range(1, 10);
617                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
618                         if (time_after(jiffies, timeout)) {
619                                 netdev_warn(dev->net,
620                                             "timeout on OTP_PWR_DN");
621                                 return -EIO;
622                         }
623                 } while (buf & OTP_PWR_DN_PWRDN_N_);
624         }
625
626         for (i = 0; i < length; i++) {
627                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
628                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
629                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
630                                         ((offset + i) & OTP_ADDR2_10_3));
631
632                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
633                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
634
635                 timeout = jiffies + HZ;
636                 do {
637                         udelay(1);
638                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
639                         if (time_after(jiffies, timeout)) {
640                                 netdev_warn(dev->net,
641                                             "timeout on OTP_STATUS");
642                                 return -EIO;
643                         }
644                 } while (buf & OTP_STATUS_BUSY_);
645
646                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
647
648                 data[i] = (u8)(buf & 0xFF);
649         }
650
651         return 0;
652 }
653
654 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
655                                  u32 length, u8 *data)
656 {
657         int i;
658         int ret;
659         u32 buf;
660         unsigned long timeout;
661
662         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
663
664         if (buf & OTP_PWR_DN_PWRDN_N_) {
665                 /* clear it and wait to be cleared */
666                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
667
668                 timeout = jiffies + HZ;
669                 do {
670                         udelay(1);
671                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
672                         if (time_after(jiffies, timeout)) {
673                                 netdev_warn(dev->net,
674                                             "timeout on OTP_PWR_DN completion");
675                                 return -EIO;
676                         }
677                 } while (buf & OTP_PWR_DN_PWRDN_N_);
678         }
679
680         /* set to BYTE program mode */
681         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
682
683         for (i = 0; i < length; i++) {
684                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
685                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
686                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
687                                         ((offset + i) & OTP_ADDR2_10_3));
688                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
689                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
690                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
691
692                 timeout = jiffies + HZ;
693                 do {
694                         udelay(1);
695                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
696                         if (time_after(jiffies, timeout)) {
697                                 netdev_warn(dev->net,
698                                             "Timeout on OTP_STATUS completion");
699                                 return -EIO;
700                         }
701                 } while (buf & OTP_STATUS_BUSY_);
702         }
703
704         return 0;
705 }
706
707 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
708                             u32 length, u8 *data)
709 {
710         u8 sig;
711         int ret;
712
713         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
714
715         if (ret == 0) {
716                 if (sig == OTP_INDICATOR_1)
717                         offset = offset;
718                 else if (sig == OTP_INDICATOR_2)
719                         offset += 0x100;
720                 else
721                         ret = -EINVAL;
722                 ret = lan78xx_read_raw_otp(dev, offset, length, data);
723         }
724
725         return ret;
726 }
727
728 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
729 {
730         int i, ret;
731
732         for (i = 0; i < 100; i++) {
733                 u32 dp_sel;
734
735                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
736                 if (unlikely(ret < 0))
737                         return -EIO;
738
739                 if (dp_sel & DP_SEL_DPRDY_)
740                         return 0;
741
742                 usleep_range(40, 100);
743         }
744
745         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
746
747         return -EIO;
748 }
749
750 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
751                                   u32 addr, u32 length, u32 *buf)
752 {
753         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
754         u32 dp_sel;
755         int i, ret;
756
757         if (usb_autopm_get_interface(dev->intf) < 0)
758                         return 0;
759
760         mutex_lock(&pdata->dataport_mutex);
761
762         ret = lan78xx_dataport_wait_not_busy(dev);
763         if (ret < 0)
764                 goto done;
765
766         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
767
768         dp_sel &= ~DP_SEL_RSEL_MASK_;
769         dp_sel |= ram_select;
770         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
771
772         for (i = 0; i < length; i++) {
773                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
774
775                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
776
777                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
778
779                 ret = lan78xx_dataport_wait_not_busy(dev);
780                 if (ret < 0)
781                         goto done;
782         }
783
784 done:
785         mutex_unlock(&pdata->dataport_mutex);
786         usb_autopm_put_interface(dev->intf);
787
788         return ret;
789 }
790
791 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
792                                     int index, u8 addr[ETH_ALEN])
793 {
794         u32     temp;
795
796         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
797                 temp = addr[3];
798                 temp = addr[2] | (temp << 8);
799                 temp = addr[1] | (temp << 8);
800                 temp = addr[0] | (temp << 8);
801                 pdata->pfilter_table[index][1] = temp;
802                 temp = addr[5];
803                 temp = addr[4] | (temp << 8);
804                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
805                 pdata->pfilter_table[index][0] = temp;
806         }
807 }
808
809 /* returns hash bit number for given MAC address */
810 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
811 {
812         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
813 }
814
815 static void lan78xx_deferred_multicast_write(struct work_struct *param)
816 {
817         struct lan78xx_priv *pdata =
818                         container_of(param, struct lan78xx_priv, set_multicast);
819         struct lan78xx_net *dev = pdata->dev;
820         int i;
821         int ret;
822
823         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
824                   pdata->rfe_ctl);
825
826         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
827                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
828
829         for (i = 1; i < NUM_OF_MAF; i++) {
830                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
831                 ret = lan78xx_write_reg(dev, MAF_LO(i),
832                                         pdata->pfilter_table[i][1]);
833                 ret = lan78xx_write_reg(dev, MAF_HI(i),
834                                         pdata->pfilter_table[i][0]);
835         }
836
837         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
838 }
839
840 static void lan78xx_set_multicast(struct net_device *netdev)
841 {
842         struct lan78xx_net *dev = netdev_priv(netdev);
843         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
844         unsigned long flags;
845         int i;
846
847         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
848
849         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
850                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
851
852         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
853                         pdata->mchash_table[i] = 0;
854         /* pfilter_table[0] has own HW address */
855         for (i = 1; i < NUM_OF_MAF; i++) {
856                         pdata->pfilter_table[i][0] =
857                         pdata->pfilter_table[i][1] = 0;
858         }
859
860         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
861
862         if (dev->net->flags & IFF_PROMISC) {
863                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
864                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
865         } else {
866                 if (dev->net->flags & IFF_ALLMULTI) {
867                         netif_dbg(dev, drv, dev->net,
868                                   "receive all multicast enabled");
869                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
870                 }
871         }
872
873         if (netdev_mc_count(dev->net)) {
874                 struct netdev_hw_addr *ha;
875                 int i;
876
877                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
878
879                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
880
881                 i = 1;
882                 netdev_for_each_mc_addr(ha, netdev) {
883                         /* set first 32 into Perfect Filter */
884                         if (i < 33) {
885                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
886                         } else {
887                                 u32 bitnum = lan78xx_hash(ha->addr);
888
889                                 pdata->mchash_table[bitnum / 32] |=
890                                                         (1 << (bitnum % 32));
891                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
892                         }
893                         i++;
894                 }
895         }
896
897         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
898
899         /* defer register writes to a sleepable context */
900         schedule_work(&pdata->set_multicast);
901 }
902
903 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
904                                       u16 lcladv, u16 rmtadv)
905 {
906         u32 flow = 0, fct_flow = 0;
907         int ret;
908         u8 cap;
909
910         if (dev->fc_autoneg)
911                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
912         else
913                 cap = dev->fc_request_control;
914
915         if (cap & FLOW_CTRL_TX)
916                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
917
918         if (cap & FLOW_CTRL_RX)
919                 flow |= FLOW_CR_RX_FCEN_;
920
921         if (dev->udev->speed == USB_SPEED_SUPER)
922                 fct_flow = 0x817;
923         else if (dev->udev->speed == USB_SPEED_HIGH)
924                 fct_flow = 0x211;
925
926         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
927                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
928                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
929
930         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
931
932         /* threshold value should be set before enabling flow */
933         ret = lan78xx_write_reg(dev, FLOW, flow);
934
935         return 0;
936 }
937
938 static int lan78xx_link_reset(struct lan78xx_net *dev)
939 {
940         struct phy_device *phydev = dev->net->phydev;
941         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
942         int ladv, radv, ret;
943         u32 buf;
944
945         /* clear PHY interrupt status */
946         ret = phy_read(phydev, LAN88XX_INT_STS);
947         if (unlikely(ret < 0))
948                 return -EIO;
949
950         /* clear LAN78xx interrupt status */
951         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
952         if (unlikely(ret < 0))
953                 return -EIO;
954
955         phy_read_status(phydev);
956
957         if (!phydev->link && dev->link_on) {
958                 dev->link_on = false;
959
960                 /* reset MAC */
961                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
962                 if (unlikely(ret < 0))
963                         return -EIO;
964                 buf |= MAC_CR_RST_;
965                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
966                 if (unlikely(ret < 0))
967                         return -EIO;
968
969                 phy_mac_interrupt(phydev, 0);
970         } else if (phydev->link && !dev->link_on) {
971                 dev->link_on = true;
972
973                 phy_ethtool_gset(phydev, &ecmd);
974
975                 ret = phy_read(phydev, LAN88XX_INT_STS);
976
977                 if (dev->udev->speed == USB_SPEED_SUPER) {
978                         if (ethtool_cmd_speed(&ecmd) == 1000) {
979                                 /* disable U2 */
980                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
981                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
982                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
983                                 /* enable U1 */
984                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
985                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
986                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
987                         } else {
988                                 /* enable U1 & U2 */
989                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
990                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
991                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
992                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
993                         }
994                 }
995
996                 ladv = phy_read(phydev, MII_ADVERTISE);
997                 if (ladv < 0)
998                         return ladv;
999
1000                 radv = phy_read(phydev, MII_LPA);
1001                 if (radv < 0)
1002                         return radv;
1003
1004                 netif_dbg(dev, link, dev->net,
1005                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1006                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1007
1008                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1009                 phy_mac_interrupt(phydev, 1);
1010         }
1011
1012         return ret;
1013 }
1014
1015 /* some work can't be done in tasklets, so we use keventd
1016  *
1017  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1018  * but tasklet_schedule() doesn't.      hope the failure is rare.
1019  */
1020 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1021 {
1022         set_bit(work, &dev->flags);
1023         if (!schedule_delayed_work(&dev->wq, 0))
1024                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1025 }
1026
1027 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1028 {
1029         u32 intdata;
1030
1031         if (urb->actual_length != 4) {
1032                 netdev_warn(dev->net,
1033                             "unexpected urb length %d", urb->actual_length);
1034                 return;
1035         }
1036
1037         memcpy(&intdata, urb->transfer_buffer, 4);
1038         le32_to_cpus(&intdata);
1039
1040         if (intdata & INT_ENP_PHY_INT) {
1041                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1042                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1043         } else
1044                 netdev_warn(dev->net,
1045                             "unexpected interrupt: 0x%08x\n", intdata);
1046 }
1047
1048 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1049 {
1050         return MAX_EEPROM_SIZE;
1051 }
1052
1053 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1054                                       struct ethtool_eeprom *ee, u8 *data)
1055 {
1056         struct lan78xx_net *dev = netdev_priv(netdev);
1057
1058         ee->magic = LAN78XX_EEPROM_MAGIC;
1059
1060         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1061 }
1062
1063 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1064                                       struct ethtool_eeprom *ee, u8 *data)
1065 {
1066         struct lan78xx_net *dev = netdev_priv(netdev);
1067
1068         /* Allow entire eeprom update only */
1069         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1070             (ee->offset == 0) &&
1071             (ee->len == 512) &&
1072             (data[0] == EEPROM_INDICATOR))
1073                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1074         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1075                  (ee->offset == 0) &&
1076                  (ee->len == 512) &&
1077                  (data[0] == OTP_INDICATOR_1))
1078                 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1079
1080         return -EINVAL;
1081 }
1082
1083 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1084                                 u8 *data)
1085 {
1086         if (stringset == ETH_SS_STATS)
1087                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1088 }
1089
1090 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1091 {
1092         if (sset == ETH_SS_STATS)
1093                 return ARRAY_SIZE(lan78xx_gstrings);
1094         else
1095                 return -EOPNOTSUPP;
1096 }
1097
1098 static void lan78xx_get_stats(struct net_device *netdev,
1099                               struct ethtool_stats *stats, u64 *data)
1100 {
1101         struct lan78xx_net *dev = netdev_priv(netdev);
1102         struct lan78xx_statstage lan78xx_stat;
1103         u32 *p;
1104         int i;
1105
1106         if (usb_autopm_get_interface(dev->intf) < 0)
1107                 return;
1108
1109         if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1110                 p = (u32 *)&lan78xx_stat;
1111                 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1112                         data[i] = p[i];
1113         }
1114
1115         usb_autopm_put_interface(dev->intf);
1116 }
1117
1118 static void lan78xx_get_wol(struct net_device *netdev,
1119                             struct ethtool_wolinfo *wol)
1120 {
1121         struct lan78xx_net *dev = netdev_priv(netdev);
1122         int ret;
1123         u32 buf;
1124         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1125
1126         if (usb_autopm_get_interface(dev->intf) < 0)
1127                         return;
1128
1129         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1130         if (unlikely(ret < 0)) {
1131                 wol->supported = 0;
1132                 wol->wolopts = 0;
1133         } else {
1134                 if (buf & USB_CFG_RMT_WKP_) {
1135                         wol->supported = WAKE_ALL;
1136                         wol->wolopts = pdata->wol;
1137                 } else {
1138                         wol->supported = 0;
1139                         wol->wolopts = 0;
1140                 }
1141         }
1142
1143         usb_autopm_put_interface(dev->intf);
1144 }
1145
1146 static int lan78xx_set_wol(struct net_device *netdev,
1147                            struct ethtool_wolinfo *wol)
1148 {
1149         struct lan78xx_net *dev = netdev_priv(netdev);
1150         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1151         int ret;
1152
1153         ret = usb_autopm_get_interface(dev->intf);
1154         if (ret < 0)
1155                 return ret;
1156
1157         pdata->wol = 0;
1158         if (wol->wolopts & WAKE_UCAST)
1159                 pdata->wol |= WAKE_UCAST;
1160         if (wol->wolopts & WAKE_MCAST)
1161                 pdata->wol |= WAKE_MCAST;
1162         if (wol->wolopts & WAKE_BCAST)
1163                 pdata->wol |= WAKE_BCAST;
1164         if (wol->wolopts & WAKE_MAGIC)
1165                 pdata->wol |= WAKE_MAGIC;
1166         if (wol->wolopts & WAKE_PHY)
1167                 pdata->wol |= WAKE_PHY;
1168         if (wol->wolopts & WAKE_ARP)
1169                 pdata->wol |= WAKE_ARP;
1170
1171         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1172
1173         phy_ethtool_set_wol(netdev->phydev, wol);
1174
1175         usb_autopm_put_interface(dev->intf);
1176
1177         return ret;
1178 }
1179
1180 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1181 {
1182         struct lan78xx_net *dev = netdev_priv(net);
1183         struct phy_device *phydev = net->phydev;
1184         int ret;
1185         u32 buf;
1186
1187         ret = usb_autopm_get_interface(dev->intf);
1188         if (ret < 0)
1189                 return ret;
1190
1191         ret = phy_ethtool_get_eee(phydev, edata);
1192         if (ret < 0)
1193                 goto exit;
1194
1195         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1196         if (buf & MAC_CR_EEE_EN_) {
1197                 edata->eee_enabled = true;
1198                 edata->eee_active = !!(edata->advertised &
1199                                        edata->lp_advertised);
1200                 edata->tx_lpi_enabled = true;
1201                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1202                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1203                 edata->tx_lpi_timer = buf;
1204         } else {
1205                 edata->eee_enabled = false;
1206                 edata->eee_active = false;
1207                 edata->tx_lpi_enabled = false;
1208                 edata->tx_lpi_timer = 0;
1209         }
1210
1211         ret = 0;
1212 exit:
1213         usb_autopm_put_interface(dev->intf);
1214
1215         return ret;
1216 }
1217
1218 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1219 {
1220         struct lan78xx_net *dev = netdev_priv(net);
1221         int ret;
1222         u32 buf;
1223
1224         ret = usb_autopm_get_interface(dev->intf);
1225         if (ret < 0)
1226                 return ret;
1227
1228         if (edata->eee_enabled) {
1229                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1230                 buf |= MAC_CR_EEE_EN_;
1231                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1232
1233                 phy_ethtool_set_eee(net->phydev, edata);
1234
1235                 buf = (u32)edata->tx_lpi_timer;
1236                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1237         } else {
1238                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1239                 buf &= ~MAC_CR_EEE_EN_;
1240                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1241         }
1242
1243         usb_autopm_put_interface(dev->intf);
1244
1245         return 0;
1246 }
1247
1248 static u32 lan78xx_get_link(struct net_device *net)
1249 {
1250         phy_read_status(net->phydev);
1251
1252         return net->phydev->link;
1253 }
1254
1255 int lan78xx_nway_reset(struct net_device *net)
1256 {
1257         return phy_start_aneg(net->phydev);
1258 }
1259
1260 static void lan78xx_get_drvinfo(struct net_device *net,
1261                                 struct ethtool_drvinfo *info)
1262 {
1263         struct lan78xx_net *dev = netdev_priv(net);
1264
1265         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1266         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1267         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1268 }
1269
1270 static u32 lan78xx_get_msglevel(struct net_device *net)
1271 {
1272         struct lan78xx_net *dev = netdev_priv(net);
1273
1274         return dev->msg_enable;
1275 }
1276
1277 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1278 {
1279         struct lan78xx_net *dev = netdev_priv(net);
1280
1281         dev->msg_enable = level;
1282 }
1283
1284 static int lan78xx_get_mdix_status(struct net_device *net)
1285 {
1286         struct phy_device *phydev = net->phydev;
1287         int buf;
1288
1289         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1290         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1291         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1292
1293         return buf;
1294 }
1295
1296 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1297 {
1298         struct lan78xx_net *dev = netdev_priv(net);
1299         struct phy_device *phydev = net->phydev;
1300         int buf;
1301
1302         if (mdix_ctrl == ETH_TP_MDI) {
1303                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1304                           LAN88XX_EXT_PAGE_SPACE_1);
1305                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1306                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1307                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1308                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1309                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1310                           LAN88XX_EXT_PAGE_SPACE_0);
1311         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1312                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1313                           LAN88XX_EXT_PAGE_SPACE_1);
1314                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1315                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1316                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1317                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1318                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1319                           LAN88XX_EXT_PAGE_SPACE_0);
1320         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1321                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1322                           LAN88XX_EXT_PAGE_SPACE_1);
1323                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1324                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1325                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1326                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1327                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1328                           LAN88XX_EXT_PAGE_SPACE_0);
1329         }
1330         dev->mdix_ctrl = mdix_ctrl;
1331 }
1332
1333 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1334 {
1335         struct lan78xx_net *dev = netdev_priv(net);
1336         struct phy_device *phydev = net->phydev;
1337         int ret;
1338         int buf;
1339
1340         ret = usb_autopm_get_interface(dev->intf);
1341         if (ret < 0)
1342                 return ret;
1343
1344         ret = phy_ethtool_gset(phydev, cmd);
1345
1346         buf = lan78xx_get_mdix_status(net);
1347
1348         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1349         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1350                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1351                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1352         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1353                 cmd->eth_tp_mdix = ETH_TP_MDI;
1354                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1355         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1356                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1357                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1358         }
1359
1360         usb_autopm_put_interface(dev->intf);
1361
1362         return ret;
1363 }
1364
1365 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1366 {
1367         struct lan78xx_net *dev = netdev_priv(net);
1368         struct phy_device *phydev = net->phydev;
1369         int ret = 0;
1370         int temp;
1371
1372         ret = usb_autopm_get_interface(dev->intf);
1373         if (ret < 0)
1374                 return ret;
1375
1376         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1377                 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1378         }
1379
1380         /* change speed & duplex */
1381         ret = phy_ethtool_sset(phydev, cmd);
1382
1383         if (!cmd->autoneg) {
1384                 /* force link down */
1385                 temp = phy_read(phydev, MII_BMCR);
1386                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1387                 mdelay(1);
1388                 phy_write(phydev, MII_BMCR, temp);
1389         }
1390
1391         usb_autopm_put_interface(dev->intf);
1392
1393         return ret;
1394 }
1395
1396 static void lan78xx_get_pause(struct net_device *net,
1397                               struct ethtool_pauseparam *pause)
1398 {
1399         struct lan78xx_net *dev = netdev_priv(net);
1400         struct phy_device *phydev = net->phydev;
1401         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1402
1403         phy_ethtool_gset(phydev, &ecmd);
1404
1405         pause->autoneg = dev->fc_autoneg;
1406
1407         if (dev->fc_request_control & FLOW_CTRL_TX)
1408                 pause->tx_pause = 1;
1409
1410         if (dev->fc_request_control & FLOW_CTRL_RX)
1411                 pause->rx_pause = 1;
1412 }
1413
1414 static int lan78xx_set_pause(struct net_device *net,
1415                              struct ethtool_pauseparam *pause)
1416 {
1417         struct lan78xx_net *dev = netdev_priv(net);
1418         struct phy_device *phydev = net->phydev;
1419         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1420         int ret;
1421
1422         phy_ethtool_gset(phydev, &ecmd);
1423
1424         if (pause->autoneg && !ecmd.autoneg) {
1425                 ret = -EINVAL;
1426                 goto exit;
1427         }
1428
1429         dev->fc_request_control = 0;
1430         if (pause->rx_pause)
1431                 dev->fc_request_control |= FLOW_CTRL_RX;
1432
1433         if (pause->tx_pause)
1434                 dev->fc_request_control |= FLOW_CTRL_TX;
1435
1436         if (ecmd.autoneg) {
1437                 u32 mii_adv;
1438
1439                 ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1440                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1441                 ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1442                 phy_ethtool_sset(phydev, &ecmd);
1443         }
1444
1445         dev->fc_autoneg = pause->autoneg;
1446
1447         ret = 0;
1448 exit:
1449         return ret;
1450 }
1451
1452 static const struct ethtool_ops lan78xx_ethtool_ops = {
1453         .get_link       = lan78xx_get_link,
1454         .nway_reset     = lan78xx_nway_reset,
1455         .get_drvinfo    = lan78xx_get_drvinfo,
1456         .get_msglevel   = lan78xx_get_msglevel,
1457         .set_msglevel   = lan78xx_set_msglevel,
1458         .get_settings   = lan78xx_get_settings,
1459         .set_settings   = lan78xx_set_settings,
1460         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1461         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1462         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1463         .get_ethtool_stats = lan78xx_get_stats,
1464         .get_sset_count = lan78xx_get_sset_count,
1465         .get_strings    = lan78xx_get_strings,
1466         .get_wol        = lan78xx_get_wol,
1467         .set_wol        = lan78xx_set_wol,
1468         .get_eee        = lan78xx_get_eee,
1469         .set_eee        = lan78xx_set_eee,
1470         .get_pauseparam = lan78xx_get_pause,
1471         .set_pauseparam = lan78xx_set_pause,
1472 };
1473
1474 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1475 {
1476         if (!netif_running(netdev))
1477                 return -EINVAL;
1478
1479         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1480 }
1481
1482 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1483 {
1484         u32 addr_lo, addr_hi;
1485         int ret;
1486         u8 addr[6];
1487
1488         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1489         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1490
1491         addr[0] = addr_lo & 0xFF;
1492         addr[1] = (addr_lo >> 8) & 0xFF;
1493         addr[2] = (addr_lo >> 16) & 0xFF;
1494         addr[3] = (addr_lo >> 24) & 0xFF;
1495         addr[4] = addr_hi & 0xFF;
1496         addr[5] = (addr_hi >> 8) & 0xFF;
1497
1498         if (!is_valid_ether_addr(addr)) {
1499                 /* reading mac address from EEPROM or OTP */
1500                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1501                                          addr) == 0) ||
1502                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1503                                       addr) == 0)) {
1504                         if (is_valid_ether_addr(addr)) {
1505                                 /* eeprom values are valid so use them */
1506                                 netif_dbg(dev, ifup, dev->net,
1507                                           "MAC address read from EEPROM");
1508                         } else {
1509                                 /* generate random MAC */
1510                                 random_ether_addr(addr);
1511                                 netif_dbg(dev, ifup, dev->net,
1512                                           "MAC address set to random addr");
1513                         }
1514
1515                         addr_lo = addr[0] | (addr[1] << 8) |
1516                                   (addr[2] << 16) | (addr[3] << 24);
1517                         addr_hi = addr[4] | (addr[5] << 8);
1518
1519                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1520                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1521                 } else {
1522                         /* generate random MAC */
1523                         random_ether_addr(addr);
1524                         netif_dbg(dev, ifup, dev->net,
1525                                   "MAC address set to random addr");
1526                 }
1527         }
1528
1529         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1530         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1531
1532         ether_addr_copy(dev->net->dev_addr, addr);
1533 }
1534
1535 /* MDIO read and write wrappers for phylib */
1536 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1537 {
1538         struct lan78xx_net *dev = bus->priv;
1539         u32 val, addr;
1540         int ret;
1541
1542         ret = usb_autopm_get_interface(dev->intf);
1543         if (ret < 0)
1544                 return ret;
1545
1546         mutex_lock(&dev->phy_mutex);
1547
1548         /* confirm MII not busy */
1549         ret = lan78xx_phy_wait_not_busy(dev);
1550         if (ret < 0)
1551                 goto done;
1552
1553         /* set the address, index & direction (read from PHY) */
1554         addr = mii_access(phy_id, idx, MII_READ);
1555         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1556
1557         ret = lan78xx_phy_wait_not_busy(dev);
1558         if (ret < 0)
1559                 goto done;
1560
1561         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1562
1563         ret = (int)(val & 0xFFFF);
1564
1565 done:
1566         mutex_unlock(&dev->phy_mutex);
1567         usb_autopm_put_interface(dev->intf);
1568         return ret;
1569 }
1570
1571 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1572                                  u16 regval)
1573 {
1574         struct lan78xx_net *dev = bus->priv;
1575         u32 val, addr;
1576         int ret;
1577
1578         ret = usb_autopm_get_interface(dev->intf);
1579         if (ret < 0)
1580                 return ret;
1581
1582         mutex_lock(&dev->phy_mutex);
1583
1584         /* confirm MII not busy */
1585         ret = lan78xx_phy_wait_not_busy(dev);
1586         if (ret < 0)
1587                 goto done;
1588
1589         val = (u32)regval;
1590         ret = lan78xx_write_reg(dev, MII_DATA, val);
1591
1592         /* set the address, index & direction (write to PHY) */
1593         addr = mii_access(phy_id, idx, MII_WRITE);
1594         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1595
1596         ret = lan78xx_phy_wait_not_busy(dev);
1597         if (ret < 0)
1598                 goto done;
1599
1600 done:
1601         mutex_unlock(&dev->phy_mutex);
1602         usb_autopm_put_interface(dev->intf);
1603         return 0;
1604 }
1605
1606 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1607 {
1608         int ret;
1609
1610         dev->mdiobus = mdiobus_alloc();
1611         if (!dev->mdiobus) {
1612                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1613                 return -ENOMEM;
1614         }
1615
1616         dev->mdiobus->priv = (void *)dev;
1617         dev->mdiobus->read = lan78xx_mdiobus_read;
1618         dev->mdiobus->write = lan78xx_mdiobus_write;
1619         dev->mdiobus->name = "lan78xx-mdiobus";
1620
1621         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1622                  dev->udev->bus->busnum, dev->udev->devnum);
1623
1624         switch (dev->chipid) {
1625         case ID_REV_CHIP_ID_7800_:
1626         case ID_REV_CHIP_ID_7850_:
1627                 /* set to internal PHY id */
1628                 dev->mdiobus->phy_mask = ~(1 << 1);
1629                 break;
1630         }
1631
1632         ret = mdiobus_register(dev->mdiobus);
1633         if (ret) {
1634                 netdev_err(dev->net, "can't register MDIO bus\n");
1635                 goto exit1;
1636         }
1637
1638         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1639         return 0;
1640 exit1:
1641         mdiobus_free(dev->mdiobus);
1642         return ret;
1643 }
1644
1645 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1646 {
1647         mdiobus_unregister(dev->mdiobus);
1648         mdiobus_free(dev->mdiobus);
1649 }
1650
1651 static void lan78xx_link_status_change(struct net_device *net)
1652 {
1653         /* nothing to do */
1654 }
1655
1656 static int lan78xx_phy_init(struct lan78xx_net *dev)
1657 {
1658         int ret;
1659         u32 mii_adv;
1660         struct phy_device *phydev = dev->net->phydev;
1661
1662         phydev = phy_find_first(dev->mdiobus);
1663         if (!phydev) {
1664                 netdev_err(dev->net, "no PHY found\n");
1665                 return -EIO;
1666         }
1667
1668         /* Enable PHY interrupts.
1669          * We handle our own interrupt
1670          */
1671         ret = phy_read(phydev, LAN88XX_INT_STS);
1672         ret = phy_write(phydev, LAN88XX_INT_MASK,
1673                         LAN88XX_INT_MASK_MDINTPIN_EN_ |
1674                         LAN88XX_INT_MASK_LINK_CHANGE_);
1675
1676         phydev->irq = PHY_IGNORE_INTERRUPT;
1677
1678         ret = phy_connect_direct(dev->net, phydev,
1679                                  lan78xx_link_status_change,
1680                                  PHY_INTERFACE_MODE_GMII);
1681         if (ret) {
1682                 netdev_err(dev->net, "can't attach PHY to %s\n",
1683                            dev->mdiobus->id);
1684                 return -EIO;
1685         }
1686
1687         /* set to AUTOMDIX */
1688         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1689
1690         /* MAC doesn't support 1000T Half */
1691         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1692
1693         /* support both flow controls */
1694         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1695         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1696         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1697         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1698
1699         genphy_config_aneg(phydev);
1700
1701         dev->fc_autoneg = phydev->autoneg;
1702
1703         phy_start(phydev);
1704
1705         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1706
1707         return 0;
1708 }
1709
1710 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1711 {
1712         int ret = 0;
1713         u32 buf;
1714         bool rxenabled;
1715
1716         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1717
1718         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1719
1720         if (rxenabled) {
1721                 buf &= ~MAC_RX_RXEN_;
1722                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1723         }
1724
1725         /* add 4 to size for FCS */
1726         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1727         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1728
1729         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1730
1731         if (rxenabled) {
1732                 buf |= MAC_RX_RXEN_;
1733                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1734         }
1735
1736         return 0;
1737 }
1738
1739 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1740 {
1741         struct sk_buff *skb;
1742         unsigned long flags;
1743         int count = 0;
1744
1745         spin_lock_irqsave(&q->lock, flags);
1746         while (!skb_queue_empty(q)) {
1747                 struct skb_data *entry;
1748                 struct urb *urb;
1749                 int ret;
1750
1751                 skb_queue_walk(q, skb) {
1752                         entry = (struct skb_data *)skb->cb;
1753                         if (entry->state != unlink_start)
1754                                 goto found;
1755                 }
1756                 break;
1757 found:
1758                 entry->state = unlink_start;
1759                 urb = entry->urb;
1760
1761                 /* Get reference count of the URB to avoid it to be
1762                  * freed during usb_unlink_urb, which may trigger
1763                  * use-after-free problem inside usb_unlink_urb since
1764                  * usb_unlink_urb is always racing with .complete
1765                  * handler(include defer_bh).
1766                  */
1767                 usb_get_urb(urb);
1768                 spin_unlock_irqrestore(&q->lock, flags);
1769                 /* during some PM-driven resume scenarios,
1770                  * these (async) unlinks complete immediately
1771                  */
1772                 ret = usb_unlink_urb(urb);
1773                 if (ret != -EINPROGRESS && ret != 0)
1774                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1775                 else
1776                         count++;
1777                 usb_put_urb(urb);
1778                 spin_lock_irqsave(&q->lock, flags);
1779         }
1780         spin_unlock_irqrestore(&q->lock, flags);
1781         return count;
1782 }
1783
1784 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1785 {
1786         struct lan78xx_net *dev = netdev_priv(netdev);
1787         int ll_mtu = new_mtu + netdev->hard_header_len;
1788         int old_hard_mtu = dev->hard_mtu;
1789         int old_rx_urb_size = dev->rx_urb_size;
1790         int ret;
1791
1792         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1793                 return -EINVAL;
1794
1795         if (new_mtu <= 0)
1796                 return -EINVAL;
1797         /* no second zero-length packet read wanted after mtu-sized packets */
1798         if ((ll_mtu % dev->maxpacket) == 0)
1799                 return -EDOM;
1800
1801         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1802
1803         netdev->mtu = new_mtu;
1804
1805         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1806         if (dev->rx_urb_size == old_hard_mtu) {
1807                 dev->rx_urb_size = dev->hard_mtu;
1808                 if (dev->rx_urb_size > old_rx_urb_size) {
1809                         if (netif_running(dev->net)) {
1810                                 unlink_urbs(dev, &dev->rxq);
1811                                 tasklet_schedule(&dev->bh);
1812                         }
1813                 }
1814         }
1815
1816         return 0;
1817 }
1818
1819 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1820 {
1821         struct lan78xx_net *dev = netdev_priv(netdev);
1822         struct sockaddr *addr = p;
1823         u32 addr_lo, addr_hi;
1824         int ret;
1825
1826         if (netif_running(netdev))
1827                 return -EBUSY;
1828
1829         if (!is_valid_ether_addr(addr->sa_data))
1830                 return -EADDRNOTAVAIL;
1831
1832         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1833
1834         addr_lo = netdev->dev_addr[0] |
1835                   netdev->dev_addr[1] << 8 |
1836                   netdev->dev_addr[2] << 16 |
1837                   netdev->dev_addr[3] << 24;
1838         addr_hi = netdev->dev_addr[4] |
1839                   netdev->dev_addr[5] << 8;
1840
1841         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1842         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1843
1844         return 0;
1845 }
1846
1847 /* Enable or disable Rx checksum offload engine */
1848 static int lan78xx_set_features(struct net_device *netdev,
1849                                 netdev_features_t features)
1850 {
1851         struct lan78xx_net *dev = netdev_priv(netdev);
1852         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1853         unsigned long flags;
1854         int ret;
1855
1856         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1857
1858         if (features & NETIF_F_RXCSUM) {
1859                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1860                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1861         } else {
1862                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1863                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1864         }
1865
1866         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1867                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1868         else
1869                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1870
1871         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1872
1873         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1874
1875         return 0;
1876 }
1877
1878 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1879 {
1880         struct lan78xx_priv *pdata =
1881                         container_of(param, struct lan78xx_priv, set_vlan);
1882         struct lan78xx_net *dev = pdata->dev;
1883
1884         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1885                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1886 }
1887
1888 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1889                                    __be16 proto, u16 vid)
1890 {
1891         struct lan78xx_net *dev = netdev_priv(netdev);
1892         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1893         u16 vid_bit_index;
1894         u16 vid_dword_index;
1895
1896         vid_dword_index = (vid >> 5) & 0x7F;
1897         vid_bit_index = vid & 0x1F;
1898
1899         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1900
1901         /* defer register writes to a sleepable context */
1902         schedule_work(&pdata->set_vlan);
1903
1904         return 0;
1905 }
1906
1907 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1908                                     __be16 proto, u16 vid)
1909 {
1910         struct lan78xx_net *dev = netdev_priv(netdev);
1911         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1912         u16 vid_bit_index;
1913         u16 vid_dword_index;
1914
1915         vid_dword_index = (vid >> 5) & 0x7F;
1916         vid_bit_index = vid & 0x1F;
1917
1918         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1919
1920         /* defer register writes to a sleepable context */
1921         schedule_work(&pdata->set_vlan);
1922
1923         return 0;
1924 }
1925
1926 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1927 {
1928         int ret;
1929         u32 buf;
1930         u32 regs[6] = { 0 };
1931
1932         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1933         if (buf & USB_CFG1_LTM_ENABLE_) {
1934                 u8 temp[2];
1935                 /* Get values from EEPROM first */
1936                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1937                         if (temp[0] == 24) {
1938                                 ret = lan78xx_read_raw_eeprom(dev,
1939                                                               temp[1] * 2,
1940                                                               24,
1941                                                               (u8 *)regs);
1942                                 if (ret < 0)
1943                                         return;
1944                         }
1945                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1946                         if (temp[0] == 24) {
1947                                 ret = lan78xx_read_raw_otp(dev,
1948                                                            temp[1] * 2,
1949                                                            24,
1950                                                            (u8 *)regs);
1951                                 if (ret < 0)
1952                                         return;
1953                         }
1954                 }
1955         }
1956
1957         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1958         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1959         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1960         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1961         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1962         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1963 }
1964
1965 static int lan78xx_reset(struct lan78xx_net *dev)
1966 {
1967         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1968         u32 buf;
1969         int ret = 0;
1970         unsigned long timeout;
1971
1972         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1973         buf |= HW_CFG_LRST_;
1974         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1975
1976         timeout = jiffies + HZ;
1977         do {
1978                 mdelay(1);
1979                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1980                 if (time_after(jiffies, timeout)) {
1981                         netdev_warn(dev->net,
1982                                     "timeout on completion of LiteReset");
1983                         return -EIO;
1984                 }
1985         } while (buf & HW_CFG_LRST_);
1986
1987         lan78xx_init_mac_address(dev);
1988
1989         /* save DEVID for later usage */
1990         ret = lan78xx_read_reg(dev, ID_REV, &buf);
1991         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
1992         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
1993
1994         /* Respond to the IN token with a NAK */
1995         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1996         buf |= USB_CFG_BIR_;
1997         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1998
1999         /* Init LTM */
2000         lan78xx_init_ltm(dev);
2001
2002         dev->net->hard_header_len += TX_OVERHEAD;
2003         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2004
2005         if (dev->udev->speed == USB_SPEED_SUPER) {
2006                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2007                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2008                 dev->rx_qlen = 4;
2009                 dev->tx_qlen = 4;
2010         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2011                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2012                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2013                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2014                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2015         } else {
2016                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2017                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2018                 dev->rx_qlen = 4;
2019         }
2020
2021         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2022         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2023
2024         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2025         buf |= HW_CFG_MEF_;
2026         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2027
2028         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2029         buf |= USB_CFG_BCE_;
2030         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2031
2032         /* set FIFO sizes */
2033         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2034         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2035
2036         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2037         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2038
2039         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2040         ret = lan78xx_write_reg(dev, FLOW, 0);
2041         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2042
2043         /* Don't need rfe_ctl_lock during initialisation */
2044         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2045         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2046         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2047
2048         /* Enable or disable checksum offload engines */
2049         lan78xx_set_features(dev->net, dev->net->features);
2050
2051         lan78xx_set_multicast(dev->net);
2052
2053         /* reset PHY */
2054         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2055         buf |= PMT_CTL_PHY_RST_;
2056         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2057
2058         timeout = jiffies + HZ;
2059         do {
2060                 mdelay(1);
2061                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2062                 if (time_after(jiffies, timeout)) {
2063                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2064                         return -EIO;
2065                 }
2066         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2067
2068         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2069         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2070         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2071
2072         /* enable PHY interrupts */
2073         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2074         buf |= INT_ENP_PHY_INT;
2075         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2076
2077         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2078         buf |= MAC_TX_TXEN_;
2079         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2080
2081         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2082         buf |= FCT_TX_CTL_EN_;
2083         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2084
2085         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2086
2087         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2088         buf |= MAC_RX_RXEN_;
2089         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2090
2091         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2092         buf |= FCT_RX_CTL_EN_;
2093         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2094
2095         return 0;
2096 }
2097
2098 static int lan78xx_open(struct net_device *net)
2099 {
2100         struct lan78xx_net *dev = netdev_priv(net);
2101         int ret;
2102
2103         ret = usb_autopm_get_interface(dev->intf);
2104         if (ret < 0)
2105                 goto out;
2106
2107         ret = lan78xx_reset(dev);
2108         if (ret < 0)
2109                 goto done;
2110
2111         ret = lan78xx_phy_init(dev);
2112         if (ret < 0)
2113                 goto done;
2114
2115         /* for Link Check */
2116         if (dev->urb_intr) {
2117                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2118                 if (ret < 0) {
2119                         netif_err(dev, ifup, dev->net,
2120                                   "intr submit %d\n", ret);
2121                         goto done;
2122                 }
2123         }
2124
2125         set_bit(EVENT_DEV_OPEN, &dev->flags);
2126
2127         netif_start_queue(net);
2128
2129         dev->link_on = false;
2130
2131         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2132 done:
2133         usb_autopm_put_interface(dev->intf);
2134
2135 out:
2136         return ret;
2137 }
2138
2139 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2140 {
2141         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2142         DECLARE_WAITQUEUE(wait, current);
2143         int temp;
2144
2145         /* ensure there are no more active urbs */
2146         add_wait_queue(&unlink_wakeup, &wait);
2147         set_current_state(TASK_UNINTERRUPTIBLE);
2148         dev->wait = &unlink_wakeup;
2149         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2150
2151         /* maybe wait for deletions to finish. */
2152         while (!skb_queue_empty(&dev->rxq) &&
2153                !skb_queue_empty(&dev->txq) &&
2154                !skb_queue_empty(&dev->done)) {
2155                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2156                 set_current_state(TASK_UNINTERRUPTIBLE);
2157                 netif_dbg(dev, ifdown, dev->net,
2158                           "waited for %d urb completions\n", temp);
2159         }
2160         set_current_state(TASK_RUNNING);
2161         dev->wait = NULL;
2162         remove_wait_queue(&unlink_wakeup, &wait);
2163 }
2164
2165 int lan78xx_stop(struct net_device *net)
2166 {
2167         struct lan78xx_net              *dev = netdev_priv(net);
2168
2169         phy_stop(net->phydev);
2170         phy_disconnect(net->phydev);
2171         net->phydev = NULL;
2172
2173         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2174         netif_stop_queue(net);
2175
2176         netif_info(dev, ifdown, dev->net,
2177                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2178                    net->stats.rx_packets, net->stats.tx_packets,
2179                    net->stats.rx_errors, net->stats.tx_errors);
2180
2181         lan78xx_terminate_urbs(dev);
2182
2183         usb_kill_urb(dev->urb_intr);
2184
2185         skb_queue_purge(&dev->rxq_pause);
2186
2187         /* deferred work (task, timer, softirq) must also stop.
2188          * can't flush_scheduled_work() until we drop rtnl (later),
2189          * else workers could deadlock; so make workers a NOP.
2190          */
2191         dev->flags = 0;
2192         cancel_delayed_work_sync(&dev->wq);
2193         tasklet_kill(&dev->bh);
2194
2195         usb_autopm_put_interface(dev->intf);
2196
2197         return 0;
2198 }
2199
2200 static int lan78xx_linearize(struct sk_buff *skb)
2201 {
2202         return skb_linearize(skb);
2203 }
2204
2205 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2206                                        struct sk_buff *skb, gfp_t flags)
2207 {
2208         u32 tx_cmd_a, tx_cmd_b;
2209
2210         if (skb_headroom(skb) < TX_OVERHEAD) {
2211                 struct sk_buff *skb2;
2212
2213                 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2214                 dev_kfree_skb_any(skb);
2215                 skb = skb2;
2216                 if (!skb)
2217                         return NULL;
2218         }
2219
2220         if (lan78xx_linearize(skb) < 0)
2221                 return NULL;
2222
2223         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2224
2225         if (skb->ip_summed == CHECKSUM_PARTIAL)
2226                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2227
2228         tx_cmd_b = 0;
2229         if (skb_is_gso(skb)) {
2230                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2231
2232                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2233
2234                 tx_cmd_a |= TX_CMD_A_LSO_;
2235         }
2236
2237         if (skb_vlan_tag_present(skb)) {
2238                 tx_cmd_a |= TX_CMD_A_IVTG_;
2239                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2240         }
2241
2242         skb_push(skb, 4);
2243         cpu_to_le32s(&tx_cmd_b);
2244         memcpy(skb->data, &tx_cmd_b, 4);
2245
2246         skb_push(skb, 4);
2247         cpu_to_le32s(&tx_cmd_a);
2248         memcpy(skb->data, &tx_cmd_a, 4);
2249
2250         return skb;
2251 }
2252
2253 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2254                                struct sk_buff_head *list, enum skb_state state)
2255 {
2256         unsigned long flags;
2257         enum skb_state old_state;
2258         struct skb_data *entry = (struct skb_data *)skb->cb;
2259
2260         spin_lock_irqsave(&list->lock, flags);
2261         old_state = entry->state;
2262         entry->state = state;
2263
2264         __skb_unlink(skb, list);
2265         spin_unlock(&list->lock);
2266         spin_lock(&dev->done.lock);
2267
2268         __skb_queue_tail(&dev->done, skb);
2269         if (skb_queue_len(&dev->done) == 1)
2270                 tasklet_schedule(&dev->bh);
2271         spin_unlock_irqrestore(&dev->done.lock, flags);
2272
2273         return old_state;
2274 }
2275
2276 static void tx_complete(struct urb *urb)
2277 {
2278         struct sk_buff *skb = (struct sk_buff *)urb->context;
2279         struct skb_data *entry = (struct skb_data *)skb->cb;
2280         struct lan78xx_net *dev = entry->dev;
2281
2282         if (urb->status == 0) {
2283                 dev->net->stats.tx_packets++;
2284                 dev->net->stats.tx_bytes += entry->length;
2285         } else {
2286                 dev->net->stats.tx_errors++;
2287
2288                 switch (urb->status) {
2289                 case -EPIPE:
2290                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2291                         break;
2292
2293                 /* software-driven interface shutdown */
2294                 case -ECONNRESET:
2295                 case -ESHUTDOWN:
2296                         break;
2297
2298                 case -EPROTO:
2299                 case -ETIME:
2300                 case -EILSEQ:
2301                         netif_stop_queue(dev->net);
2302                         break;
2303                 default:
2304                         netif_dbg(dev, tx_err, dev->net,
2305                                   "tx err %d\n", entry->urb->status);
2306                         break;
2307                 }
2308         }
2309
2310         usb_autopm_put_interface_async(dev->intf);
2311
2312         defer_bh(dev, skb, &dev->txq, tx_done);
2313 }
2314
2315 static void lan78xx_queue_skb(struct sk_buff_head *list,
2316                               struct sk_buff *newsk, enum skb_state state)
2317 {
2318         struct skb_data *entry = (struct skb_data *)newsk->cb;
2319
2320         __skb_queue_tail(list, newsk);
2321         entry->state = state;
2322 }
2323
2324 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2325 {
2326         struct lan78xx_net *dev = netdev_priv(net);
2327         struct sk_buff *skb2 = NULL;
2328
2329         if (skb) {
2330                 skb_tx_timestamp(skb);
2331                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2332         }
2333
2334         if (skb2) {
2335                 skb_queue_tail(&dev->txq_pend, skb2);
2336
2337                 /* throttle TX patch at slower than SUPER SPEED USB */
2338                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2339                     (skb_queue_len(&dev->txq_pend) > 10))
2340                         netif_stop_queue(net);
2341         } else {
2342                 netif_dbg(dev, tx_err, dev->net,
2343                           "lan78xx_tx_prep return NULL\n");
2344                 dev->net->stats.tx_errors++;
2345                 dev->net->stats.tx_dropped++;
2346         }
2347
2348         tasklet_schedule(&dev->bh);
2349
2350         return NETDEV_TX_OK;
2351 }
2352
2353 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2354 {
2355         int tmp;
2356         struct usb_host_interface *alt = NULL;
2357         struct usb_host_endpoint *in = NULL, *out = NULL;
2358         struct usb_host_endpoint *status = NULL;
2359
2360         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2361                 unsigned ep;
2362
2363                 in = NULL;
2364                 out = NULL;
2365                 status = NULL;
2366                 alt = intf->altsetting + tmp;
2367
2368                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2369                         struct usb_host_endpoint *e;
2370                         int intr = 0;
2371
2372                         e = alt->endpoint + ep;
2373                         switch (e->desc.bmAttributes) {
2374                         case USB_ENDPOINT_XFER_INT:
2375                                 if (!usb_endpoint_dir_in(&e->desc))
2376                                         continue;
2377                                 intr = 1;
2378                                 /* FALLTHROUGH */
2379                         case USB_ENDPOINT_XFER_BULK:
2380                                 break;
2381                         default:
2382                                 continue;
2383                         }
2384                         if (usb_endpoint_dir_in(&e->desc)) {
2385                                 if (!intr && !in)
2386                                         in = e;
2387                                 else if (intr && !status)
2388                                         status = e;
2389                         } else {
2390                                 if (!out)
2391                                         out = e;
2392                         }
2393                 }
2394                 if (in && out)
2395                         break;
2396         }
2397         if (!alt || !in || !out)
2398                 return -EINVAL;
2399
2400         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2401                                        in->desc.bEndpointAddress &
2402                                        USB_ENDPOINT_NUMBER_MASK);
2403         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2404                                         out->desc.bEndpointAddress &
2405                                         USB_ENDPOINT_NUMBER_MASK);
2406         dev->ep_intr = status;
2407
2408         return 0;
2409 }
2410
2411 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2412 {
2413         struct lan78xx_priv *pdata = NULL;
2414         int ret;
2415         int i;
2416
2417         ret = lan78xx_get_endpoints(dev, intf);
2418
2419         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2420
2421         pdata = (struct lan78xx_priv *)(dev->data[0]);
2422         if (!pdata) {
2423                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2424                 return -ENOMEM;
2425         }
2426
2427         pdata->dev = dev;
2428
2429         spin_lock_init(&pdata->rfe_ctl_lock);
2430         mutex_init(&pdata->dataport_mutex);
2431
2432         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2433
2434         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2435                 pdata->vlan_table[i] = 0;
2436
2437         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2438
2439         dev->net->features = 0;
2440
2441         if (DEFAULT_TX_CSUM_ENABLE)
2442                 dev->net->features |= NETIF_F_HW_CSUM;
2443
2444         if (DEFAULT_RX_CSUM_ENABLE)
2445                 dev->net->features |= NETIF_F_RXCSUM;
2446
2447         if (DEFAULT_TSO_CSUM_ENABLE)
2448                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2449
2450         dev->net->hw_features = dev->net->features;
2451
2452         /* Init all registers */
2453         ret = lan78xx_reset(dev);
2454
2455         lan78xx_mdio_init(dev);
2456
2457         dev->net->flags |= IFF_MULTICAST;
2458
2459         pdata->wol = WAKE_MAGIC;
2460
2461         return 0;
2462 }
2463
2464 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2465 {
2466         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2467
2468         lan78xx_remove_mdio(dev);
2469
2470         if (pdata) {
2471                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2472                 kfree(pdata);
2473                 pdata = NULL;
2474                 dev->data[0] = 0;
2475         }
2476 }
2477
2478 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2479                                     struct sk_buff *skb,
2480                                     u32 rx_cmd_a, u32 rx_cmd_b)
2481 {
2482         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2483             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2484                 skb->ip_summed = CHECKSUM_NONE;
2485         } else {
2486                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2487                 skb->ip_summed = CHECKSUM_COMPLETE;
2488         }
2489 }
2490
2491 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2492 {
2493         int             status;
2494
2495         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2496                 skb_queue_tail(&dev->rxq_pause, skb);
2497                 return;
2498         }
2499
2500         skb->protocol = eth_type_trans(skb, dev->net);
2501         dev->net->stats.rx_packets++;
2502         dev->net->stats.rx_bytes += skb->len;
2503
2504         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2505                   skb->len + sizeof(struct ethhdr), skb->protocol);
2506         memset(skb->cb, 0, sizeof(struct skb_data));
2507
2508         if (skb_defer_rx_timestamp(skb))
2509                 return;
2510
2511         status = netif_rx(skb);
2512         if (status != NET_RX_SUCCESS)
2513                 netif_dbg(dev, rx_err, dev->net,
2514                           "netif_rx status %d\n", status);
2515 }
2516
2517 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2518 {
2519         if (skb->len < dev->net->hard_header_len)
2520                 return 0;
2521
2522         while (skb->len > 0) {
2523                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2524                 u16 rx_cmd_c;
2525                 struct sk_buff *skb2;
2526                 unsigned char *packet;
2527
2528                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2529                 le32_to_cpus(&rx_cmd_a);
2530                 skb_pull(skb, sizeof(rx_cmd_a));
2531
2532                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2533                 le32_to_cpus(&rx_cmd_b);
2534                 skb_pull(skb, sizeof(rx_cmd_b));
2535
2536                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2537                 le16_to_cpus(&rx_cmd_c);
2538                 skb_pull(skb, sizeof(rx_cmd_c));
2539
2540                 packet = skb->data;
2541
2542                 /* get the packet length */
2543                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2544                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2545
2546                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2547                         netif_dbg(dev, rx_err, dev->net,
2548                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2549                 } else {
2550                         /* last frame in this batch */
2551                         if (skb->len == size) {
2552                                 lan78xx_rx_csum_offload(dev, skb,
2553                                                         rx_cmd_a, rx_cmd_b);
2554
2555                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2556                                 skb->truesize = size + sizeof(struct sk_buff);
2557
2558                                 return 1;
2559                         }
2560
2561                         skb2 = skb_clone(skb, GFP_ATOMIC);
2562                         if (unlikely(!skb2)) {
2563                                 netdev_warn(dev->net, "Error allocating skb");
2564                                 return 0;
2565                         }
2566
2567                         skb2->len = size;
2568                         skb2->data = packet;
2569                         skb_set_tail_pointer(skb2, size);
2570
2571                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2572
2573                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2574                         skb2->truesize = size + sizeof(struct sk_buff);
2575
2576                         lan78xx_skb_return(dev, skb2);
2577                 }
2578
2579                 skb_pull(skb, size);
2580
2581                 /* padding bytes before the next frame starts */
2582                 if (skb->len)
2583                         skb_pull(skb, align_count);
2584         }
2585
2586         return 1;
2587 }
2588
2589 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2590 {
2591         if (!lan78xx_rx(dev, skb)) {
2592                 dev->net->stats.rx_errors++;
2593                 goto done;
2594         }
2595
2596         if (skb->len) {
2597                 lan78xx_skb_return(dev, skb);
2598                 return;
2599         }
2600
2601         netif_dbg(dev, rx_err, dev->net, "drop\n");
2602         dev->net->stats.rx_errors++;
2603 done:
2604         skb_queue_tail(&dev->done, skb);
2605 }
2606
2607 static void rx_complete(struct urb *urb);
2608
2609 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2610 {
2611         struct sk_buff *skb;
2612         struct skb_data *entry;
2613         unsigned long lockflags;
2614         size_t size = dev->rx_urb_size;
2615         int ret = 0;
2616
2617         skb = netdev_alloc_skb_ip_align(dev->net, size);
2618         if (!skb) {
2619                 usb_free_urb(urb);
2620                 return -ENOMEM;
2621         }
2622
2623         entry = (struct skb_data *)skb->cb;
2624         entry->urb = urb;
2625         entry->dev = dev;
2626         entry->length = 0;
2627
2628         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2629                           skb->data, size, rx_complete, skb);
2630
2631         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2632
2633         if (netif_device_present(dev->net) &&
2634             netif_running(dev->net) &&
2635             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2636             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2637                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2638                 switch (ret) {
2639                 case 0:
2640                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2641                         break;
2642                 case -EPIPE:
2643                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2644                         break;
2645                 case -ENODEV:
2646                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2647                         netif_device_detach(dev->net);
2648                         break;
2649                 case -EHOSTUNREACH:
2650                         ret = -ENOLINK;
2651                         break;
2652                 default:
2653                         netif_dbg(dev, rx_err, dev->net,
2654                                   "rx submit, %d\n", ret);
2655                         tasklet_schedule(&dev->bh);
2656                 }
2657         } else {
2658                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2659                 ret = -ENOLINK;
2660         }
2661         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2662         if (ret) {
2663                 dev_kfree_skb_any(skb);
2664                 usb_free_urb(urb);
2665         }
2666         return ret;
2667 }
2668
2669 static void rx_complete(struct urb *urb)
2670 {
2671         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2672         struct skb_data *entry = (struct skb_data *)skb->cb;
2673         struct lan78xx_net *dev = entry->dev;
2674         int urb_status = urb->status;
2675         enum skb_state state;
2676
2677         skb_put(skb, urb->actual_length);
2678         state = rx_done;
2679         entry->urb = NULL;
2680
2681         switch (urb_status) {
2682         case 0:
2683                 if (skb->len < dev->net->hard_header_len) {
2684                         state = rx_cleanup;
2685                         dev->net->stats.rx_errors++;
2686                         dev->net->stats.rx_length_errors++;
2687                         netif_dbg(dev, rx_err, dev->net,
2688                                   "rx length %d\n", skb->len);
2689                 }
2690                 usb_mark_last_busy(dev->udev);
2691                 break;
2692         case -EPIPE:
2693                 dev->net->stats.rx_errors++;
2694                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2695                 /* FALLTHROUGH */
2696         case -ECONNRESET:                               /* async unlink */
2697         case -ESHUTDOWN:                                /* hardware gone */
2698                 netif_dbg(dev, ifdown, dev->net,
2699                           "rx shutdown, code %d\n", urb_status);
2700                 state = rx_cleanup;
2701                 entry->urb = urb;
2702                 urb = NULL;
2703                 break;
2704         case -EPROTO:
2705         case -ETIME:
2706         case -EILSEQ:
2707                 dev->net->stats.rx_errors++;
2708                 state = rx_cleanup;
2709                 entry->urb = urb;
2710                 urb = NULL;
2711                 break;
2712
2713         /* data overrun ... flush fifo? */
2714         case -EOVERFLOW:
2715                 dev->net->stats.rx_over_errors++;
2716                 /* FALLTHROUGH */
2717
2718         default:
2719                 state = rx_cleanup;
2720                 dev->net->stats.rx_errors++;
2721                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2722                 break;
2723         }
2724
2725         state = defer_bh(dev, skb, &dev->rxq, state);
2726
2727         if (urb) {
2728                 if (netif_running(dev->net) &&
2729                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2730                     state != unlink_start) {
2731                         rx_submit(dev, urb, GFP_ATOMIC);
2732                         return;
2733                 }
2734                 usb_free_urb(urb);
2735         }
2736         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2737 }
2738
2739 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2740 {
2741         int length;
2742         struct urb *urb = NULL;
2743         struct skb_data *entry;
2744         unsigned long flags;
2745         struct sk_buff_head *tqp = &dev->txq_pend;
2746         struct sk_buff *skb, *skb2;
2747         int ret;
2748         int count, pos;
2749         int skb_totallen, pkt_cnt;
2750
2751         skb_totallen = 0;
2752         pkt_cnt = 0;
2753         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2754                 if (skb_is_gso(skb)) {
2755                         if (pkt_cnt) {
2756                                 /* handle previous packets first */
2757                                 break;
2758                         }
2759                         length = skb->len;
2760                         skb2 = skb_dequeue(tqp);
2761                         goto gso_skb;
2762                 }
2763
2764                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2765                         break;
2766                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2767                 pkt_cnt++;
2768         }
2769
2770         /* copy to a single skb */
2771         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2772         if (!skb)
2773                 goto drop;
2774
2775         skb_put(skb, skb_totallen);
2776
2777         for (count = pos = 0; count < pkt_cnt; count++) {
2778                 skb2 = skb_dequeue(tqp);
2779                 if (skb2) {
2780                         memcpy(skb->data + pos, skb2->data, skb2->len);
2781                         pos += roundup(skb2->len, sizeof(u32));
2782                         dev_kfree_skb(skb2);
2783                 }
2784         }
2785
2786         length = skb_totallen;
2787
2788 gso_skb:
2789         urb = usb_alloc_urb(0, GFP_ATOMIC);
2790         if (!urb) {
2791                 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2792                 goto drop;
2793         }
2794
2795         entry = (struct skb_data *)skb->cb;
2796         entry->urb = urb;
2797         entry->dev = dev;
2798         entry->length = length;
2799
2800         spin_lock_irqsave(&dev->txq.lock, flags);
2801         ret = usb_autopm_get_interface_async(dev->intf);
2802         if (ret < 0) {
2803                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2804                 goto drop;
2805         }
2806
2807         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2808                           skb->data, skb->len, tx_complete, skb);
2809
2810         if (length % dev->maxpacket == 0) {
2811                 /* send USB_ZERO_PACKET */
2812                 urb->transfer_flags |= URB_ZERO_PACKET;
2813         }
2814
2815 #ifdef CONFIG_PM
2816         /* if this triggers the device is still a sleep */
2817         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2818                 /* transmission will be done in resume */
2819                 usb_anchor_urb(urb, &dev->deferred);
2820                 /* no use to process more packets */
2821                 netif_stop_queue(dev->net);
2822                 usb_put_urb(urb);
2823                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2824                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2825                 return;
2826         }
2827 #endif
2828
2829         ret = usb_submit_urb(urb, GFP_ATOMIC);
2830         switch (ret) {
2831         case 0:
2832                 dev->net->trans_start = jiffies;
2833                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2834                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2835                         netif_stop_queue(dev->net);
2836                 break;
2837         case -EPIPE:
2838                 netif_stop_queue(dev->net);
2839                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2840                 usb_autopm_put_interface_async(dev->intf);
2841                 break;
2842         default:
2843                 usb_autopm_put_interface_async(dev->intf);
2844                 netif_dbg(dev, tx_err, dev->net,
2845                           "tx: submit urb err %d\n", ret);
2846                 break;
2847         }
2848
2849         spin_unlock_irqrestore(&dev->txq.lock, flags);
2850
2851         if (ret) {
2852                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2853 drop:
2854                 dev->net->stats.tx_dropped++;
2855                 if (skb)
2856                         dev_kfree_skb_any(skb);
2857                 usb_free_urb(urb);
2858         } else
2859                 netif_dbg(dev, tx_queued, dev->net,
2860                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
2861 }
2862
2863 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2864 {
2865         struct urb *urb;
2866         int i;
2867
2868         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2869                 for (i = 0; i < 10; i++) {
2870                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2871                                 break;
2872                         urb = usb_alloc_urb(0, GFP_ATOMIC);
2873                         if (urb)
2874                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2875                                         return;
2876                 }
2877
2878                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2879                         tasklet_schedule(&dev->bh);
2880         }
2881         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2882                 netif_wake_queue(dev->net);
2883 }
2884
2885 static void lan78xx_bh(unsigned long param)
2886 {
2887         struct lan78xx_net *dev = (struct lan78xx_net *)param;
2888         struct sk_buff *skb;
2889         struct skb_data *entry;
2890
2891         while ((skb = skb_dequeue(&dev->done))) {
2892                 entry = (struct skb_data *)(skb->cb);
2893                 switch (entry->state) {
2894                 case rx_done:
2895                         entry->state = rx_cleanup;
2896                         rx_process(dev, skb);
2897                         continue;
2898                 case tx_done:
2899                         usb_free_urb(entry->urb);
2900                         dev_kfree_skb(skb);
2901                         continue;
2902                 case rx_cleanup:
2903                         usb_free_urb(entry->urb);
2904                         dev_kfree_skb(skb);
2905                         continue;
2906                 default:
2907                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
2908                         return;
2909                 }
2910         }
2911
2912         if (netif_device_present(dev->net) && netif_running(dev->net)) {
2913                 if (!skb_queue_empty(&dev->txq_pend))
2914                         lan78xx_tx_bh(dev);
2915
2916                 if (!timer_pending(&dev->delay) &&
2917                     !test_bit(EVENT_RX_HALT, &dev->flags))
2918                         lan78xx_rx_bh(dev);
2919         }
2920 }
2921
2922 static void lan78xx_delayedwork(struct work_struct *work)
2923 {
2924         int status;
2925         struct lan78xx_net *dev;
2926
2927         dev = container_of(work, struct lan78xx_net, wq.work);
2928
2929         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2930                 unlink_urbs(dev, &dev->txq);
2931                 status = usb_autopm_get_interface(dev->intf);
2932                 if (status < 0)
2933                         goto fail_pipe;
2934                 status = usb_clear_halt(dev->udev, dev->pipe_out);
2935                 usb_autopm_put_interface(dev->intf);
2936                 if (status < 0 &&
2937                     status != -EPIPE &&
2938                     status != -ESHUTDOWN) {
2939                         if (netif_msg_tx_err(dev))
2940 fail_pipe:
2941                                 netdev_err(dev->net,
2942                                            "can't clear tx halt, status %d\n",
2943                                            status);
2944                 } else {
2945                         clear_bit(EVENT_TX_HALT, &dev->flags);
2946                         if (status != -ESHUTDOWN)
2947                                 netif_wake_queue(dev->net);
2948                 }
2949         }
2950         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2951                 unlink_urbs(dev, &dev->rxq);
2952                 status = usb_autopm_get_interface(dev->intf);
2953                 if (status < 0)
2954                                 goto fail_halt;
2955                 status = usb_clear_halt(dev->udev, dev->pipe_in);
2956                 usb_autopm_put_interface(dev->intf);
2957                 if (status < 0 &&
2958                     status != -EPIPE &&
2959                     status != -ESHUTDOWN) {
2960                         if (netif_msg_rx_err(dev))
2961 fail_halt:
2962                                 netdev_err(dev->net,
2963                                            "can't clear rx halt, status %d\n",
2964                                            status);
2965                 } else {
2966                         clear_bit(EVENT_RX_HALT, &dev->flags);
2967                         tasklet_schedule(&dev->bh);
2968                 }
2969         }
2970
2971         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2972                 int ret = 0;
2973
2974                 clear_bit(EVENT_LINK_RESET, &dev->flags);
2975                 status = usb_autopm_get_interface(dev->intf);
2976                 if (status < 0)
2977                         goto skip_reset;
2978                 if (lan78xx_link_reset(dev) < 0) {
2979                         usb_autopm_put_interface(dev->intf);
2980 skip_reset:
2981                         netdev_info(dev->net, "link reset failed (%d)\n",
2982                                     ret);
2983                 } else {
2984                         usb_autopm_put_interface(dev->intf);
2985                 }
2986         }
2987 }
2988
2989 static void intr_complete(struct urb *urb)
2990 {
2991         struct lan78xx_net *dev = urb->context;
2992         int status = urb->status;
2993
2994         switch (status) {
2995         /* success */
2996         case 0:
2997                 lan78xx_status(dev, urb);
2998                 break;
2999
3000         /* software-driven interface shutdown */
3001         case -ENOENT:                   /* urb killed */
3002         case -ESHUTDOWN:                /* hardware gone */
3003                 netif_dbg(dev, ifdown, dev->net,
3004                           "intr shutdown, code %d\n", status);
3005                 return;
3006
3007         /* NOTE:  not throttling like RX/TX, since this endpoint
3008          * already polls infrequently
3009          */
3010         default:
3011                 netdev_dbg(dev->net, "intr status %d\n", status);
3012                 break;
3013         }
3014
3015         if (!netif_running(dev->net))
3016                 return;
3017
3018         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3019         status = usb_submit_urb(urb, GFP_ATOMIC);
3020         if (status != 0)
3021                 netif_err(dev, timer, dev->net,
3022                           "intr resubmit --> %d\n", status);
3023 }
3024
3025 static void lan78xx_disconnect(struct usb_interface *intf)
3026 {
3027         struct lan78xx_net              *dev;
3028         struct usb_device               *udev;
3029         struct net_device               *net;
3030
3031         dev = usb_get_intfdata(intf);
3032         usb_set_intfdata(intf, NULL);
3033         if (!dev)
3034                 return;
3035
3036         udev = interface_to_usbdev(intf);
3037
3038         net = dev->net;
3039         unregister_netdev(net);
3040
3041         cancel_delayed_work_sync(&dev->wq);
3042
3043         usb_scuttle_anchored_urbs(&dev->deferred);
3044
3045         lan78xx_unbind(dev, intf);
3046
3047         usb_kill_urb(dev->urb_intr);
3048         usb_free_urb(dev->urb_intr);
3049
3050         free_netdev(net);
3051         usb_put_dev(udev);
3052 }
3053
3054 void lan78xx_tx_timeout(struct net_device *net)
3055 {
3056         struct lan78xx_net *dev = netdev_priv(net);
3057
3058         unlink_urbs(dev, &dev->txq);
3059         tasklet_schedule(&dev->bh);
3060 }
3061
3062 static const struct net_device_ops lan78xx_netdev_ops = {
3063         .ndo_open               = lan78xx_open,
3064         .ndo_stop               = lan78xx_stop,
3065         .ndo_start_xmit         = lan78xx_start_xmit,
3066         .ndo_tx_timeout         = lan78xx_tx_timeout,
3067         .ndo_change_mtu         = lan78xx_change_mtu,
3068         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3069         .ndo_validate_addr      = eth_validate_addr,
3070         .ndo_do_ioctl           = lan78xx_ioctl,
3071         .ndo_set_rx_mode        = lan78xx_set_multicast,
3072         .ndo_set_features       = lan78xx_set_features,
3073         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3074         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3075 };
3076
3077 static int lan78xx_probe(struct usb_interface *intf,
3078                          const struct usb_device_id *id)
3079 {
3080         struct lan78xx_net *dev;
3081         struct net_device *netdev;
3082         struct usb_device *udev;
3083         int ret;
3084         unsigned maxp;
3085         unsigned period;
3086         u8 *buf = NULL;
3087
3088         udev = interface_to_usbdev(intf);
3089         udev = usb_get_dev(udev);
3090
3091         ret = -ENOMEM;
3092         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3093         if (!netdev) {
3094                         dev_err(&intf->dev, "Error: OOM\n");
3095                         goto out1;
3096         }
3097
3098         /* netdev_printk() needs this */
3099         SET_NETDEV_DEV(netdev, &intf->dev);
3100
3101         dev = netdev_priv(netdev);
3102         dev->udev = udev;
3103         dev->intf = intf;
3104         dev->net = netdev;
3105         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3106                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3107
3108         skb_queue_head_init(&dev->rxq);
3109         skb_queue_head_init(&dev->txq);
3110         skb_queue_head_init(&dev->done);
3111         skb_queue_head_init(&dev->rxq_pause);
3112         skb_queue_head_init(&dev->txq_pend);
3113         mutex_init(&dev->phy_mutex);
3114
3115         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3116         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3117         init_usb_anchor(&dev->deferred);
3118
3119         netdev->netdev_ops = &lan78xx_netdev_ops;
3120         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3121         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3122
3123         ret = lan78xx_bind(dev, intf);
3124         if (ret < 0)
3125                 goto out2;
3126         strcpy(netdev->name, "eth%d");
3127
3128         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3129                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3130
3131         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3132         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3133         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3134
3135         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3136         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3137
3138         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3139                                         dev->ep_intr->desc.bEndpointAddress &
3140                                         USB_ENDPOINT_NUMBER_MASK);
3141         period = dev->ep_intr->desc.bInterval;
3142
3143         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3144         buf = kmalloc(maxp, GFP_KERNEL);
3145         if (buf) {
3146                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3147                 if (!dev->urb_intr) {
3148                         kfree(buf);
3149                         goto out3;
3150                 } else {
3151                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3152                                          dev->pipe_intr, buf, maxp,
3153                                          intr_complete, dev, period);
3154                 }
3155         }
3156
3157         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3158
3159         /* driver requires remote-wakeup capability during autosuspend. */
3160         intf->needs_remote_wakeup = 1;
3161
3162         ret = register_netdev(netdev);
3163         if (ret != 0) {
3164                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3165                 goto out2;
3166         }
3167
3168         usb_set_intfdata(intf, dev);
3169
3170         ret = device_set_wakeup_enable(&udev->dev, true);
3171
3172          /* Default delay of 2sec has more overhead than advantage.
3173           * Set to 10sec as default.
3174           */
3175         pm_runtime_set_autosuspend_delay(&udev->dev,
3176                                          DEFAULT_AUTOSUSPEND_DELAY);
3177
3178         return 0;
3179
3180 out3:
3181         lan78xx_unbind(dev, intf);
3182 out2:
3183         free_netdev(netdev);
3184 out1:
3185         usb_put_dev(udev);
3186
3187         return ret;
3188 }
3189
3190 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3191 {
3192         const u16 crc16poly = 0x8005;
3193         int i;
3194         u16 bit, crc, msb;
3195         u8 data;
3196
3197         crc = 0xFFFF;
3198         for (i = 0; i < len; i++) {
3199                 data = *buf++;
3200                 for (bit = 0; bit < 8; bit++) {
3201                         msb = crc >> 15;
3202                         crc <<= 1;
3203
3204                         if (msb ^ (u16)(data & 1)) {
3205                                 crc ^= crc16poly;
3206                                 crc |= (u16)0x0001U;
3207                         }
3208                         data >>= 1;
3209                 }
3210         }
3211
3212         return crc;
3213 }
3214
3215 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3216 {
3217         u32 buf;
3218         int ret;
3219         int mask_index;
3220         u16 crc;
3221         u32 temp_wucsr;
3222         u32 temp_pmt_ctl;
3223         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3224         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3225         const u8 arp_type[2] = { 0x08, 0x06 };
3226
3227         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3228         buf &= ~MAC_TX_TXEN_;
3229         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3230         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3231         buf &= ~MAC_RX_RXEN_;
3232         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3233
3234         ret = lan78xx_write_reg(dev, WUCSR, 0);
3235         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3236         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3237
3238         temp_wucsr = 0;
3239
3240         temp_pmt_ctl = 0;
3241         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3242         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3243         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3244
3245         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3246                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3247
3248         mask_index = 0;
3249         if (wol & WAKE_PHY) {
3250                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3251
3252                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3253                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3254                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3255         }
3256         if (wol & WAKE_MAGIC) {
3257                 temp_wucsr |= WUCSR_MPEN_;
3258
3259                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3260                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3261                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3262         }
3263         if (wol & WAKE_BCAST) {
3264                 temp_wucsr |= WUCSR_BCST_EN_;
3265
3266                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3267                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3268                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3269         }
3270         if (wol & WAKE_MCAST) {
3271                 temp_wucsr |= WUCSR_WAKE_EN_;
3272
3273                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3274                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3275                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3276                                         WUF_CFGX_EN_ |
3277                                         WUF_CFGX_TYPE_MCAST_ |
3278                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3279                                         (crc & WUF_CFGX_CRC16_MASK_));
3280
3281                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3282                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3283                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3284                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3285                 mask_index++;
3286
3287                 /* for IPv6 Multicast */
3288                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3289                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3290                                         WUF_CFGX_EN_ |
3291                                         WUF_CFGX_TYPE_MCAST_ |
3292                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3293                                         (crc & WUF_CFGX_CRC16_MASK_));
3294
3295                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3296                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3297                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3298                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3299                 mask_index++;
3300
3301                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3302                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3303                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3304         }
3305         if (wol & WAKE_UCAST) {
3306                 temp_wucsr |= WUCSR_PFDA_EN_;
3307
3308                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3309                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3310                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3311         }
3312         if (wol & WAKE_ARP) {
3313                 temp_wucsr |= WUCSR_WAKE_EN_;
3314
3315                 /* set WUF_CFG & WUF_MASK
3316                  * for packettype (offset 12,13) = ARP (0x0806)
3317                  */
3318                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3319                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3320                                         WUF_CFGX_EN_ |
3321                                         WUF_CFGX_TYPE_ALL_ |
3322                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3323                                         (crc & WUF_CFGX_CRC16_MASK_));
3324
3325                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3326                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3327                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3328                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3329                 mask_index++;
3330
3331                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3332                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3333                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3334         }
3335
3336         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3337
3338         /* when multiple WOL bits are set */
3339         if (hweight_long((unsigned long)wol) > 1) {
3340                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3341                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3342                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3343         }
3344         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3345
3346         /* clear WUPS */
3347         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3348         buf |= PMT_CTL_WUPS_MASK_;
3349         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3350
3351         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3352         buf |= MAC_RX_RXEN_;
3353         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3354
3355         return 0;
3356 }
3357
3358 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3359 {
3360         struct lan78xx_net *dev = usb_get_intfdata(intf);
3361         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3362         u32 buf;
3363         int ret;
3364         int event;
3365
3366         event = message.event;
3367
3368         if (!dev->suspend_count++) {
3369                 spin_lock_irq(&dev->txq.lock);
3370                 /* don't autosuspend while transmitting */
3371                 if ((skb_queue_len(&dev->txq) ||
3372                      skb_queue_len(&dev->txq_pend)) &&
3373                         PMSG_IS_AUTO(message)) {
3374                         spin_unlock_irq(&dev->txq.lock);
3375                         ret = -EBUSY;
3376                         goto out;
3377                 } else {
3378                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3379                         spin_unlock_irq(&dev->txq.lock);
3380                 }
3381
3382                 /* stop TX & RX */
3383                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3384                 buf &= ~MAC_TX_TXEN_;
3385                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3386                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3387                 buf &= ~MAC_RX_RXEN_;
3388                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3389
3390                 /* empty out the rx and queues */
3391                 netif_device_detach(dev->net);
3392                 lan78xx_terminate_urbs(dev);
3393                 usb_kill_urb(dev->urb_intr);
3394
3395                 /* reattach */
3396                 netif_device_attach(dev->net);
3397         }
3398
3399         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3400                 if (PMSG_IS_AUTO(message)) {
3401                         /* auto suspend (selective suspend) */
3402                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3403                         buf &= ~MAC_TX_TXEN_;
3404                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3405                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3406                         buf &= ~MAC_RX_RXEN_;
3407                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3408
3409                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3410                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3411                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3412
3413                         /* set goodframe wakeup */
3414                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3415
3416                         buf |= WUCSR_RFE_WAKE_EN_;
3417                         buf |= WUCSR_STORE_WAKE_;
3418
3419                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3420
3421                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3422
3423                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3424                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3425
3426                         buf |= PMT_CTL_PHY_WAKE_EN_;
3427                         buf |= PMT_CTL_WOL_EN_;
3428                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3429                         buf |= PMT_CTL_SUS_MODE_3_;
3430
3431                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3432
3433                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3434
3435                         buf |= PMT_CTL_WUPS_MASK_;
3436
3437                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3438
3439                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3440                         buf |= MAC_RX_RXEN_;
3441                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3442                 } else {
3443                         lan78xx_set_suspend(dev, pdata->wol);
3444                 }
3445         }
3446
3447         ret = 0;
3448 out:
3449         return ret;
3450 }
3451
3452 int lan78xx_resume(struct usb_interface *intf)
3453 {
3454         struct lan78xx_net *dev = usb_get_intfdata(intf);
3455         struct sk_buff *skb;
3456         struct urb *res;
3457         int ret;
3458         u32 buf;
3459
3460         if (!--dev->suspend_count) {
3461                 /* resume interrupt URBs */
3462                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3463                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3464
3465                 spin_lock_irq(&dev->txq.lock);
3466                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3467                         skb = (struct sk_buff *)res->context;
3468                         ret = usb_submit_urb(res, GFP_ATOMIC);
3469                         if (ret < 0) {
3470                                 dev_kfree_skb_any(skb);
3471                                 usb_free_urb(res);
3472                                 usb_autopm_put_interface_async(dev->intf);
3473                         } else {
3474                                 dev->net->trans_start = jiffies;
3475                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3476                         }
3477                 }
3478
3479                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3480                 spin_unlock_irq(&dev->txq.lock);
3481
3482                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3483                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3484                                 netif_start_queue(dev->net);
3485                         tasklet_schedule(&dev->bh);
3486                 }
3487         }
3488
3489         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3490         ret = lan78xx_write_reg(dev, WUCSR, 0);
3491         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3492
3493         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3494                                              WUCSR2_ARP_RCD_ |
3495                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3496                                              WUCSR2_IPV4_TCPSYN_RCD_);
3497
3498         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3499                                             WUCSR_EEE_RX_WAKE_ |
3500                                             WUCSR_PFDA_FR_ |
3501                                             WUCSR_RFE_WAKE_FR_ |
3502                                             WUCSR_WUFR_ |
3503                                             WUCSR_MPR_ |
3504                                             WUCSR_BCST_FR_);
3505
3506         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3507         buf |= MAC_TX_TXEN_;
3508         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3509
3510         return 0;
3511 }
3512
3513 int lan78xx_reset_resume(struct usb_interface *intf)
3514 {
3515         struct lan78xx_net *dev = usb_get_intfdata(intf);
3516
3517         lan78xx_reset(dev);
3518
3519         lan78xx_phy_init(dev);
3520
3521         return lan78xx_resume(intf);
3522 }
3523
3524 static const struct usb_device_id products[] = {
3525         {
3526         /* LAN7800 USB Gigabit Ethernet Device */
3527         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3528         },
3529         {
3530         /* LAN7850 USB Gigabit Ethernet Device */
3531         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3532         },
3533         {},
3534 };
3535 MODULE_DEVICE_TABLE(usb, products);
3536
3537 static struct usb_driver lan78xx_driver = {
3538         .name                   = DRIVER_NAME,
3539         .id_table               = products,
3540         .probe                  = lan78xx_probe,
3541         .disconnect             = lan78xx_disconnect,
3542         .suspend                = lan78xx_suspend,
3543         .resume                 = lan78xx_resume,
3544         .reset_resume           = lan78xx_reset_resume,
3545         .supports_autosuspend   = 1,
3546         .disable_hub_initiated_lpm = 1,
3547 };
3548
3549 module_usb_driver(lan78xx_driver);
3550
3551 MODULE_AUTHOR(DRIVER_AUTHOR);
3552 MODULE_DESCRIPTION(DRIVER_DESC);
3553 MODULE_LICENSE("GPL");