]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/amd/amd8111e.c
ethernet: amd: fix pci device ids
[karo-tx-linux.git] / drivers / net / ethernet / amd / amd8111e.c
1
2 /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
3  * Copyright (C) 2004 Advanced Micro Devices
4  *
5  *
6  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10  * Copyright 1993 United States Government as represented by the
11  *      Director, National Security Agency.[ pcnet32.c ]
12  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14  *
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  *
21  * This program is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, see <http://www.gnu.org/licenses/>.
28
29 Module Name:
30
31         amd8111e.c
32
33 Abstract:
34
35          AMD8111 based 10/100 Ethernet Controller Driver.
36
37 Environment:
38
39         Kernel Mode
40
41 Revision History:
42         3.0.0
43            Initial Revision.
44         3.0.1
45          1. Dynamic interrupt coalescing.
46          2. Removed prev_stats.
47          3. MII support.
48          4. Dynamic IPG support
49         3.0.2  05/29/2003
50          1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
51          2. Bug fix: Fixed VLAN support failure.
52          3. Bug fix: Fixed receive interrupt coalescing bug.
53          4. Dynamic IPG support is disabled by default.
54         3.0.3 06/05/2003
55          1. Bug fix: Fixed failure to close the interface if SMP is enabled.
56         3.0.4 12/09/2003
57          1. Added set_mac_address routine for bonding driver support.
58          2. Tested the driver for bonding support
59          3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
60             indicated to the h/w.
61          4. Modified amd8111e_rx() routine to receive all the received packets
62             in the first interrupt.
63          5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
64         3.0.5 03/22/2004
65          1. Added NAPI support
66
67 */
68
69
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/types.h>
73 #include <linux/compiler.h>
74 #include <linux/delay.h>
75 #include <linux/interrupt.h>
76 #include <linux/ioport.h>
77 #include <linux/pci.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/ethtool.h>
82 #include <linux/mii.h>
83 #include <linux/if_vlan.h>
84 #include <linux/ctype.h>
85 #include <linux/crc32.h>
86 #include <linux/dma-mapping.h>
87
88 #include <asm/io.h>
89 #include <asm/byteorder.h>
90 #include <asm/uaccess.h>
91
92 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
93 #define AMD8111E_VLAN_TAG_USED 1
94 #else
95 #define AMD8111E_VLAN_TAG_USED 0
96 #endif
97
98 #include "amd8111e.h"
99 #define MODULE_NAME     "amd8111e"
100 #define MODULE_VERS     "3.0.7"
101 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
102 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
103 MODULE_LICENSE("GPL");
104 module_param_array(speed_duplex, int, NULL, 0);
105 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
106 module_param_array(coalesce, bool, NULL, 0);
107 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
108 module_param_array(dynamic_ipg, bool, NULL, 0);
109 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
110
111 /* This function will read the PHY registers. */
112 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
113 {
114         void __iomem *mmio = lp->mmio;
115         unsigned int reg_val;
116         unsigned int repeat= REPEAT_CNT;
117
118         reg_val = readl(mmio + PHY_ACCESS);
119         while (reg_val & PHY_CMD_ACTIVE)
120                 reg_val = readl( mmio + PHY_ACCESS );
121
122         writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
123                            ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
124         do{
125                 reg_val = readl(mmio + PHY_ACCESS);
126                 udelay(30);  /* It takes 30 us to read/write data */
127         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
128         if(reg_val & PHY_RD_ERR)
129                 goto err_phy_read;
130
131         *val = reg_val & 0xffff;
132         return 0;
133 err_phy_read:
134         *val = 0;
135         return -EINVAL;
136
137 }
138
139 /* This function will write into PHY registers. */
140 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
141 {
142         unsigned int repeat = REPEAT_CNT;
143         void __iomem *mmio = lp->mmio;
144         unsigned int reg_val;
145
146         reg_val = readl(mmio + PHY_ACCESS);
147         while (reg_val & PHY_CMD_ACTIVE)
148                 reg_val = readl( mmio + PHY_ACCESS );
149
150         writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
151                            ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
152
153         do{
154                 reg_val = readl(mmio + PHY_ACCESS);
155                 udelay(30);  /* It takes 30 us to read/write the data */
156         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
157
158         if(reg_val & PHY_RD_ERR)
159                 goto err_phy_write;
160
161         return 0;
162
163 err_phy_write:
164         return -EINVAL;
165
166 }
167
168 /* This is the mii register read function provided to the mii interface. */
169 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
170 {
171         struct amd8111e_priv* lp = netdev_priv(dev);
172         unsigned int reg_val;
173
174         amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
175         return reg_val;
176
177 }
178
179 /* This is the mii register write function provided to the mii interface. */
180 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
181 {
182         struct amd8111e_priv* lp = netdev_priv(dev);
183
184         amd8111e_write_phy(lp, phy_id, reg_num, val);
185 }
186
187 /* This function will set PHY speed. During initialization sets
188  * the original speed to 100 full
189  */
190 static void amd8111e_set_ext_phy(struct net_device *dev)
191 {
192         struct amd8111e_priv *lp = netdev_priv(dev);
193         u32 bmcr,advert,tmp;
194
195         /* Determine mii register values to set the speed */
196         advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
197         tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
198         switch (lp->ext_phy_option){
199
200                 default:
201                 case SPEED_AUTONEG: /* advertise all values */
202                         tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
203                                 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
204                         break;
205                 case SPEED10_HALF:
206                         tmp |= ADVERTISE_10HALF;
207                         break;
208                 case SPEED10_FULL:
209                         tmp |= ADVERTISE_10FULL;
210                         break;
211                 case SPEED100_HALF:
212                         tmp |= ADVERTISE_100HALF;
213                         break;
214                 case SPEED100_FULL:
215                         tmp |= ADVERTISE_100FULL;
216                         break;
217         }
218
219         if(advert != tmp)
220                 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
221         /* Restart auto negotiation */
222         bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
223         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
224         amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
225
226 }
227
228 /* This function will unmap skb->data space and will free
229  * all transmit and receive skbuffs.
230  */
231 static int amd8111e_free_skbs(struct net_device *dev)
232 {
233         struct amd8111e_priv *lp = netdev_priv(dev);
234         struct sk_buff* rx_skbuff;
235         int i;
236
237         /* Freeing transmit skbs */
238         for(i = 0; i < NUM_TX_BUFFERS; i++){
239                 if(lp->tx_skbuff[i]){
240                         pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],                                        lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
241                         dev_kfree_skb (lp->tx_skbuff[i]);
242                         lp->tx_skbuff[i] = NULL;
243                         lp->tx_dma_addr[i] = 0;
244                 }
245         }
246         /* Freeing previously allocated receive buffers */
247         for (i = 0; i < NUM_RX_BUFFERS; i++){
248                 rx_skbuff = lp->rx_skbuff[i];
249                 if(rx_skbuff != NULL){
250                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
251                                   lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
252                         dev_kfree_skb(lp->rx_skbuff[i]);
253                         lp->rx_skbuff[i] = NULL;
254                         lp->rx_dma_addr[i] = 0;
255                 }
256         }
257
258         return 0;
259 }
260
261 /* This will set the receive buffer length corresponding
262  * to the mtu size of networkinterface.
263  */
264 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
265 {
266         struct amd8111e_priv* lp = netdev_priv(dev);
267         unsigned int mtu = dev->mtu;
268
269         if (mtu > ETH_DATA_LEN){
270                 /* MTU + ethernet header + FCS
271                  * + optional VLAN tag + skb reserve space 2
272                  */
273                 lp->rx_buff_len = mtu + ETH_HLEN + 10;
274                 lp->options |= OPTION_JUMBO_ENABLE;
275         } else{
276                 lp->rx_buff_len = PKT_BUFF_SZ;
277                 lp->options &= ~OPTION_JUMBO_ENABLE;
278         }
279 }
280
281 /* This function will free all the previously allocated buffers,
282  * determine new receive buffer length  and will allocate new receive buffers.
283  * This function also allocates and initializes both the transmitter
284  * and receive hardware descriptors.
285  */
286 static int amd8111e_init_ring(struct net_device *dev)
287 {
288         struct amd8111e_priv *lp = netdev_priv(dev);
289         int i;
290
291         lp->rx_idx = lp->tx_idx = 0;
292         lp->tx_complete_idx = 0;
293         lp->tx_ring_idx = 0;
294
295
296         if(lp->opened)
297                 /* Free previously allocated transmit and receive skbs */
298                 amd8111e_free_skbs(dev);
299
300         else{
301                  /* allocate the tx and rx descriptors */
302                 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
303                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
304                         &lp->tx_ring_dma_addr)) == NULL)
305
306                         goto err_no_mem;
307
308                 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
309                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
310                         &lp->rx_ring_dma_addr)) == NULL)
311
312                         goto err_free_tx_ring;
313
314         }
315         /* Set new receive buff size */
316         amd8111e_set_rx_buff_len(dev);
317
318         /* Allocating receive  skbs */
319         for (i = 0; i < NUM_RX_BUFFERS; i++) {
320
321                 lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
322                 if (!lp->rx_skbuff[i]) {
323                                 /* Release previos allocated skbs */
324                                 for(--i; i >= 0 ;i--)
325                                         dev_kfree_skb(lp->rx_skbuff[i]);
326                                 goto err_free_rx_ring;
327                 }
328                 skb_reserve(lp->rx_skbuff[i],2);
329         }
330         /* Initilaizing receive descriptors */
331         for (i = 0; i < NUM_RX_BUFFERS; i++) {
332                 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
333                         lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
334
335                 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
336                 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
337                 wmb();
338                 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
339         }
340
341         /* Initializing transmit descriptors */
342         for (i = 0; i < NUM_TX_RING_DR; i++) {
343                 lp->tx_ring[i].buff_phy_addr = 0;
344                 lp->tx_ring[i].tx_flags = 0;
345                 lp->tx_ring[i].buff_count = 0;
346         }
347
348         return 0;
349
350 err_free_rx_ring:
351
352         pci_free_consistent(lp->pci_dev,
353                 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
354                 lp->rx_ring_dma_addr);
355
356 err_free_tx_ring:
357
358         pci_free_consistent(lp->pci_dev,
359                  sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
360                  lp->tx_ring_dma_addr);
361
362 err_no_mem:
363         return -ENOMEM;
364 }
365
366 /* This function will set the interrupt coalescing according
367  * to the input arguments
368  */
369 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
370 {
371         unsigned int timeout;
372         unsigned int event_count;
373
374         struct amd8111e_priv *lp = netdev_priv(dev);
375         void __iomem *mmio = lp->mmio;
376         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
377
378
379         switch(cmod)
380         {
381                 case RX_INTR_COAL :
382                         timeout = coal_conf->rx_timeout;
383                         event_count = coal_conf->rx_event_count;
384                         if( timeout > MAX_TIMEOUT ||
385                                         event_count > MAX_EVENT_COUNT )
386                                 return -EINVAL;
387
388                         timeout = timeout * DELAY_TIMER_CONV;
389                         writel(VAL0|STINTEN, mmio+INTEN0);
390                         writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
391                                                         mmio+DLY_INT_A);
392                         break;
393
394                 case TX_INTR_COAL :
395                         timeout = coal_conf->tx_timeout;
396                         event_count = coal_conf->tx_event_count;
397                         if( timeout > MAX_TIMEOUT ||
398                                         event_count > MAX_EVENT_COUNT )
399                                 return -EINVAL;
400
401
402                         timeout = timeout * DELAY_TIMER_CONV;
403                         writel(VAL0|STINTEN,mmio+INTEN0);
404                         writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
405                                                          mmio+DLY_INT_B);
406                         break;
407
408                 case DISABLE_COAL:
409                         writel(0,mmio+STVAL);
410                         writel(STINTEN, mmio+INTEN0);
411                         writel(0, mmio +DLY_INT_B);
412                         writel(0, mmio+DLY_INT_A);
413                         break;
414                  case ENABLE_COAL:
415                        /* Start the timer */
416                         writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
417                         writel(VAL0|STINTEN, mmio+INTEN0);
418                         break;
419                 default:
420                         break;
421
422    }
423         return 0;
424
425 }
426
427 /* This function initializes the device registers  and starts the device. */
428 static int amd8111e_restart(struct net_device *dev)
429 {
430         struct amd8111e_priv *lp = netdev_priv(dev);
431         void __iomem *mmio = lp->mmio;
432         int i,reg_val;
433
434         /* stop the chip */
435          writel(RUN, mmio + CMD0);
436
437         if(amd8111e_init_ring(dev))
438                 return -ENOMEM;
439
440         /* enable the port manager and set auto negotiation always */
441         writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
442         writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
443
444         amd8111e_set_ext_phy(dev);
445
446         /* set control registers */
447         reg_val = readl(mmio + CTRL1);
448         reg_val &= ~XMTSP_MASK;
449         writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
450
451         /* enable interrupt */
452         writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
453                 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
454                 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
455
456         writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
457
458         /* initialize tx and rx ring base addresses */
459         writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
460         writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
461
462         writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
463         writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
464
465         /* set default IPG to 96 */
466         writew((u32)DEFAULT_IPG,mmio+IPG);
467         writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
468
469         if(lp->options & OPTION_JUMBO_ENABLE){
470                 writel((u32)VAL2|JUMBO, mmio + CMD3);
471                 /* Reset REX_UFLO */
472                 writel( REX_UFLO, mmio + CMD2);
473                 /* Should not set REX_UFLO for jumbo frames */
474                 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
475         }else{
476                 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
477                 writel((u32)JUMBO, mmio + CMD3);
478         }
479
480 #if AMD8111E_VLAN_TAG_USED
481         writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
482 #endif
483         writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
484
485         /* Setting the MAC address to the device */
486         for (i = 0; i < ETH_ALEN; i++)
487                 writeb( dev->dev_addr[i], mmio + PADR + i );
488
489         /* Enable interrupt coalesce */
490         if(lp->options & OPTION_INTR_COAL_ENABLE){
491                 netdev_info(dev, "Interrupt Coalescing Enabled.\n");
492                 amd8111e_set_coalesce(dev,ENABLE_COAL);
493         }
494
495         /* set RUN bit to start the chip */
496         writel(VAL2 | RDMD0, mmio + CMD0);
497         writel(VAL0 | INTREN | RUN, mmio + CMD0);
498
499         /* To avoid PCI posting bug */
500         readl(mmio+CMD0);
501         return 0;
502 }
503
504 /* This function clears necessary the device registers. */
505 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
506 {
507         unsigned int reg_val;
508         unsigned int logic_filter[2] ={0,};
509         void __iomem *mmio = lp->mmio;
510
511
512         /* stop the chip */
513         writel(RUN, mmio + CMD0);
514
515         /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
516         writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
517
518         /* Clear RCV_RING_BASE_ADDR */
519         writel(0, mmio + RCV_RING_BASE_ADDR0);
520
521         /* Clear XMT_RING_BASE_ADDR */
522         writel(0, mmio + XMT_RING_BASE_ADDR0);
523         writel(0, mmio + XMT_RING_BASE_ADDR1);
524         writel(0, mmio + XMT_RING_BASE_ADDR2);
525         writel(0, mmio + XMT_RING_BASE_ADDR3);
526
527         /* Clear CMD0  */
528         writel(CMD0_CLEAR,mmio + CMD0);
529
530         /* Clear CMD2 */
531         writel(CMD2_CLEAR, mmio +CMD2);
532
533         /* Clear CMD7 */
534         writel(CMD7_CLEAR , mmio + CMD7);
535
536         /* Clear DLY_INT_A and DLY_INT_B */
537         writel(0x0, mmio + DLY_INT_A);
538         writel(0x0, mmio + DLY_INT_B);
539
540         /* Clear FLOW_CONTROL */
541         writel(0x0, mmio + FLOW_CONTROL);
542
543         /* Clear INT0  write 1 to clear register */
544         reg_val = readl(mmio + INT0);
545         writel(reg_val, mmio + INT0);
546
547         /* Clear STVAL */
548         writel(0x0, mmio + STVAL);
549
550         /* Clear INTEN0 */
551         writel( INTEN0_CLEAR, mmio + INTEN0);
552
553         /* Clear LADRF */
554         writel(0x0 , mmio + LADRF);
555
556         /* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
557         writel( 0x80010,mmio + SRAM_SIZE);
558
559         /* Clear RCV_RING0_LEN */
560         writel(0x0, mmio +  RCV_RING_LEN0);
561
562         /* Clear XMT_RING0/1/2/3_LEN */
563         writel(0x0, mmio +  XMT_RING_LEN0);
564         writel(0x0, mmio +  XMT_RING_LEN1);
565         writel(0x0, mmio +  XMT_RING_LEN2);
566         writel(0x0, mmio +  XMT_RING_LEN3);
567
568         /* Clear XMT_RING_LIMIT */
569         writel(0x0, mmio + XMT_RING_LIMIT);
570
571         /* Clear MIB */
572         writew(MIB_CLEAR, mmio + MIB_ADDR);
573
574         /* Clear LARF */
575         amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
576
577         /* SRAM_SIZE register */
578         reg_val = readl(mmio + SRAM_SIZE);
579
580         if(lp->options & OPTION_JUMBO_ENABLE)
581                 writel( VAL2|JUMBO, mmio + CMD3);
582 #if AMD8111E_VLAN_TAG_USED
583         writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
584 #endif
585         /* Set default value to CTRL1 Register */
586         writel(CTRL1_DEFAULT, mmio + CTRL1);
587
588         /* To avoid PCI posting bug */
589         readl(mmio + CMD2);
590
591 }
592
593 /* This function disables the interrupt and clears all the pending
594  * interrupts in INT0
595  */
596 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
597 {
598         u32 intr0;
599
600         /* Disable interrupt */
601         writel(INTREN, lp->mmio + CMD0);
602
603         /* Clear INT0 */
604         intr0 = readl(lp->mmio + INT0);
605         writel(intr0, lp->mmio + INT0);
606
607         /* To avoid PCI posting bug */
608         readl(lp->mmio + INT0);
609
610 }
611
612 /* This function stops the chip. */
613 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
614 {
615         writel(RUN, lp->mmio + CMD0);
616
617         /* To avoid PCI posting bug */
618         readl(lp->mmio + CMD0);
619 }
620
621 /* This function frees the  transmiter and receiver descriptor rings. */
622 static void amd8111e_free_ring(struct amd8111e_priv* lp)
623 {
624         /* Free transmit and receive descriptor rings */
625         if(lp->rx_ring){
626                 pci_free_consistent(lp->pci_dev,
627                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
628                         lp->rx_ring, lp->rx_ring_dma_addr);
629                 lp->rx_ring = NULL;
630         }
631
632         if(lp->tx_ring){
633                 pci_free_consistent(lp->pci_dev,
634                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
635                         lp->tx_ring, lp->tx_ring_dma_addr);
636
637                 lp->tx_ring = NULL;
638         }
639
640 }
641
642 /* This function will free all the transmit skbs that are actually
643  * transmitted by the device. It will check the ownership of the
644  * skb before freeing the skb.
645  */
646 static int amd8111e_tx(struct net_device *dev)
647 {
648         struct amd8111e_priv* lp = netdev_priv(dev);
649         int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
650         int status;
651         /* Complete all the transmit packet */
652         while (lp->tx_complete_idx != lp->tx_idx){
653                 tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
654                 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
655
656                 if(status & OWN_BIT)
657                         break;  /* It still hasn't been Txed */
658
659                 lp->tx_ring[tx_index].buff_phy_addr = 0;
660
661                 /* We must free the original skb */
662                 if (lp->tx_skbuff[tx_index]) {
663                         pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
664                                         lp->tx_skbuff[tx_index]->len,
665                                         PCI_DMA_TODEVICE);
666                         dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
667                         lp->tx_skbuff[tx_index] = NULL;
668                         lp->tx_dma_addr[tx_index] = 0;
669                 }
670                 lp->tx_complete_idx++;
671                 /*COAL update tx coalescing parameters */
672                 lp->coal_conf.tx_packets++;
673                 lp->coal_conf.tx_bytes +=
674                         le16_to_cpu(lp->tx_ring[tx_index].buff_count);
675
676                 if (netif_queue_stopped(dev) &&
677                         lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
678                         /* The ring is no longer full, clear tbusy. */
679                         /* lp->tx_full = 0; */
680                         netif_wake_queue (dev);
681                 }
682         }
683         return 0;
684 }
685
686 /* This function handles the driver receive operation in polling mode */
687 static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
688 {
689         struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
690         struct net_device *dev = lp->amd8111e_net_dev;
691         int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
692         void __iomem *mmio = lp->mmio;
693         struct sk_buff *skb,*new_skb;
694         int min_pkt_len, status;
695         unsigned int intr0;
696         int num_rx_pkt = 0;
697         short pkt_len;
698 #if AMD8111E_VLAN_TAG_USED
699         short vtag;
700 #endif
701         int rx_pkt_limit = budget;
702         unsigned long flags;
703
704         if (rx_pkt_limit <= 0)
705                 goto rx_not_empty;
706
707         do{
708                 /* process receive packets until we use the quota.
709                  * If we own the next entry, it's a new packet. Send it up.
710                  */
711                 while(1) {
712                         status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
713                         if (status & OWN_BIT)
714                                 break;
715
716                         /* There is a tricky error noted by John Murphy,
717                          * <murf@perftech.com> to Russ Nelson: Even with
718                          * full-sized * buffers it's possible for a
719                          * jabber packet to use two buffers, with only
720                          * the last correctly noting the error.
721                          */
722                         if(status & ERR_BIT) {
723                                 /* reseting flags */
724                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
725                                 goto err_next_pkt;
726                         }
727                         /* check for STP and ENP */
728                         if(!((status & STP_BIT) && (status & ENP_BIT))){
729                                 /* reseting flags */
730                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
731                                 goto err_next_pkt;
732                         }
733                         pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
734
735 #if AMD8111E_VLAN_TAG_USED
736                         vtag = status & TT_MASK;
737                         /*MAC will strip vlan tag*/
738                         if (vtag != 0)
739                                 min_pkt_len =MIN_PKT_LEN - 4;
740                         else
741 #endif
742                                 min_pkt_len =MIN_PKT_LEN;
743
744                         if (pkt_len < min_pkt_len) {
745                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
746                                 lp->drv_rx_errors++;
747                                 goto err_next_pkt;
748                         }
749                         if(--rx_pkt_limit < 0)
750                                 goto rx_not_empty;
751                         new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
752                         if (!new_skb) {
753                                 /* if allocation fail,
754                                  * ignore that pkt and go to next one
755                                  */
756                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
757                                 lp->drv_rx_errors++;
758                                 goto err_next_pkt;
759                         }
760
761                         skb_reserve(new_skb, 2);
762                         skb = lp->rx_skbuff[rx_index];
763                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
764                                          lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
765                         skb_put(skb, pkt_len);
766                         lp->rx_skbuff[rx_index] = new_skb;
767                         lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
768                                                                    new_skb->data,
769                                                                    lp->rx_buff_len-2,
770                                                                    PCI_DMA_FROMDEVICE);
771
772                         skb->protocol = eth_type_trans(skb, dev);
773
774 #if AMD8111E_VLAN_TAG_USED
775                         if (vtag == TT_VLAN_TAGGED){
776                                 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
777                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
778                         }
779 #endif
780                         netif_receive_skb(skb);
781                         /*COAL update rx coalescing parameters*/
782                         lp->coal_conf.rx_packets++;
783                         lp->coal_conf.rx_bytes += pkt_len;
784                         num_rx_pkt++;
785
786                 err_next_pkt:
787                         lp->rx_ring[rx_index].buff_phy_addr
788                                 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
789                         lp->rx_ring[rx_index].buff_count =
790                                 cpu_to_le16(lp->rx_buff_len-2);
791                         wmb();
792                         lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
793                         rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
794                 }
795                 /* Check the interrupt status register for more packets in the
796                  * mean time. Process them since we have not used up our quota.
797                  */
798                 intr0 = readl(mmio + INT0);
799                 /*Ack receive packets */
800                 writel(intr0 & RINT0,mmio + INT0);
801
802         } while(intr0 & RINT0);
803
804         if (rx_pkt_limit > 0) {
805                 /* Receive descriptor is empty now */
806                 spin_lock_irqsave(&lp->lock, flags);
807                 __napi_complete(napi);
808                 writel(VAL0|RINTEN0, mmio + INTEN0);
809                 writel(VAL2 | RDMD0, mmio + CMD0);
810                 spin_unlock_irqrestore(&lp->lock, flags);
811         }
812
813 rx_not_empty:
814         return num_rx_pkt;
815 }
816
817 /* This function will indicate the link status to the kernel. */
818 static int amd8111e_link_change(struct net_device* dev)
819 {
820         struct amd8111e_priv *lp = netdev_priv(dev);
821         int status0,speed;
822
823         /* read the link change */
824         status0 = readl(lp->mmio + STAT0);
825
826         if(status0 & LINK_STATS){
827                 if(status0 & AUTONEG_COMPLETE)
828                         lp->link_config.autoneg = AUTONEG_ENABLE;
829                 else
830                         lp->link_config.autoneg = AUTONEG_DISABLE;
831
832                 if(status0 & FULL_DPLX)
833                         lp->link_config.duplex = DUPLEX_FULL;
834                 else
835                         lp->link_config.duplex = DUPLEX_HALF;
836                 speed = (status0 & SPEED_MASK) >> 7;
837                 if(speed == PHY_SPEED_10)
838                         lp->link_config.speed = SPEED_10;
839                 else if(speed == PHY_SPEED_100)
840                         lp->link_config.speed = SPEED_100;
841
842                 netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
843                             (lp->link_config.speed == SPEED_100) ?
844                                                         "100" : "10",
845                             (lp->link_config.duplex == DUPLEX_FULL) ?
846                                                         "Full" : "Half");
847
848                 netif_carrier_on(dev);
849         }
850         else{
851                 lp->link_config.speed = SPEED_INVALID;
852                 lp->link_config.duplex = DUPLEX_INVALID;
853                 lp->link_config.autoneg = AUTONEG_INVALID;
854                 netdev_info(dev, "Link is Down.\n");
855                 netif_carrier_off(dev);
856         }
857
858         return 0;
859 }
860
861 /* This function reads the mib counters. */
862 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
863 {
864         unsigned int  status;
865         unsigned  int data;
866         unsigned int repeat = REPEAT_CNT;
867
868         writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
869         do {
870                 status = readw(mmio + MIB_ADDR);
871                 udelay(2);      /* controller takes MAX 2 us to get mib data */
872         }
873         while (--repeat && (status & MIB_CMD_ACTIVE));
874
875         data = readl(mmio + MIB_DATA);
876         return data;
877 }
878
879 /* This function reads the mib registers and returns the hardware statistics.
880  * It updates previous internal driver statistics with new values.
881  */
882 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
883 {
884         struct amd8111e_priv *lp = netdev_priv(dev);
885         void __iomem *mmio = lp->mmio;
886         unsigned long flags;
887         struct net_device_stats *new_stats = &dev->stats;
888
889         if (!lp->opened)
890                 return new_stats;
891         spin_lock_irqsave (&lp->lock, flags);
892
893         /* stats.rx_packets */
894         new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
895                                 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
896                                 amd8111e_read_mib(mmio, rcv_unicast_pkts);
897
898         /* stats.tx_packets */
899         new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
900
901         /*stats.rx_bytes */
902         new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
903
904         /* stats.tx_bytes */
905         new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
906
907         /* stats.rx_errors */
908         /* hw errors + errors driver reported */
909         new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
910                                 amd8111e_read_mib(mmio, rcv_fragments)+
911                                 amd8111e_read_mib(mmio, rcv_jabbers)+
912                                 amd8111e_read_mib(mmio, rcv_alignment_errors)+
913                                 amd8111e_read_mib(mmio, rcv_fcs_errors)+
914                                 amd8111e_read_mib(mmio, rcv_miss_pkts)+
915                                 lp->drv_rx_errors;
916
917         /* stats.tx_errors */
918         new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
919
920         /* stats.rx_dropped*/
921         new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
922
923         /* stats.tx_dropped*/
924         new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
925
926         /* stats.multicast*/
927         new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
928
929         /* stats.collisions*/
930         new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
931
932         /* stats.rx_length_errors*/
933         new_stats->rx_length_errors =
934                 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
935                 amd8111e_read_mib(mmio, rcv_oversize_pkts);
936
937         /* stats.rx_over_errors*/
938         new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
939
940         /* stats.rx_crc_errors*/
941         new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
942
943         /* stats.rx_frame_errors*/
944         new_stats->rx_frame_errors =
945                 amd8111e_read_mib(mmio, rcv_alignment_errors);
946
947         /* stats.rx_fifo_errors */
948         new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
949
950         /* stats.rx_missed_errors */
951         new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
952
953         /* stats.tx_aborted_errors*/
954         new_stats->tx_aborted_errors =
955                 amd8111e_read_mib(mmio, xmt_excessive_collision);
956
957         /* stats.tx_carrier_errors*/
958         new_stats->tx_carrier_errors =
959                 amd8111e_read_mib(mmio, xmt_loss_carrier);
960
961         /* stats.tx_fifo_errors*/
962         new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
963
964         /* stats.tx_window_errors*/
965         new_stats->tx_window_errors =
966                 amd8111e_read_mib(mmio, xmt_late_collision);
967
968         /* Reset the mibs for collecting new statistics */
969         /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
970
971         spin_unlock_irqrestore (&lp->lock, flags);
972
973         return new_stats;
974 }
975
976 /* This function recalculate the interrupt coalescing  mode on every interrupt
977  * according to the datarate and the packet rate.
978  */
979 static int amd8111e_calc_coalesce(struct net_device *dev)
980 {
981         struct amd8111e_priv *lp = netdev_priv(dev);
982         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
983         int tx_pkt_rate;
984         int rx_pkt_rate;
985         int tx_data_rate;
986         int rx_data_rate;
987         int rx_pkt_size;
988         int tx_pkt_size;
989
990         tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
991         coal_conf->tx_prev_packets =  coal_conf->tx_packets;
992
993         tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
994         coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
995
996         rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
997         coal_conf->rx_prev_packets =  coal_conf->rx_packets;
998
999         rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1000         coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1001
1002         if(rx_pkt_rate < 800){
1003                 if(coal_conf->rx_coal_type != NO_COALESCE){
1004
1005                         coal_conf->rx_timeout = 0x0;
1006                         coal_conf->rx_event_count = 0;
1007                         amd8111e_set_coalesce(dev,RX_INTR_COAL);
1008                         coal_conf->rx_coal_type = NO_COALESCE;
1009                 }
1010         }
1011         else{
1012
1013                 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1014                 if (rx_pkt_size < 128){
1015                         if(coal_conf->rx_coal_type != NO_COALESCE){
1016
1017                                 coal_conf->rx_timeout = 0;
1018                                 coal_conf->rx_event_count = 0;
1019                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1020                                 coal_conf->rx_coal_type = NO_COALESCE;
1021                         }
1022
1023                 }
1024                 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1025
1026                         if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1027                                 coal_conf->rx_timeout = 1;
1028                                 coal_conf->rx_event_count = 4;
1029                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1030                                 coal_conf->rx_coal_type = LOW_COALESCE;
1031                         }
1032                 }
1033                 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1034
1035                         if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1036                                 coal_conf->rx_timeout = 1;
1037                                 coal_conf->rx_event_count = 4;
1038                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1039                                 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1040                         }
1041
1042                 }
1043                 else if(rx_pkt_size >= 1024){
1044                         if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1045                                 coal_conf->rx_timeout = 2;
1046                                 coal_conf->rx_event_count = 3;
1047                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1048                                 coal_conf->rx_coal_type = HIGH_COALESCE;
1049                         }
1050                 }
1051         }
1052         /* NOW FOR TX INTR COALESC */
1053         if(tx_pkt_rate < 800){
1054                 if(coal_conf->tx_coal_type != NO_COALESCE){
1055
1056                         coal_conf->tx_timeout = 0x0;
1057                         coal_conf->tx_event_count = 0;
1058                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1059                         coal_conf->tx_coal_type = NO_COALESCE;
1060                 }
1061         }
1062         else{
1063
1064                 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1065                 if (tx_pkt_size < 128){
1066
1067                         if(coal_conf->tx_coal_type != NO_COALESCE){
1068
1069                                 coal_conf->tx_timeout = 0;
1070                                 coal_conf->tx_event_count = 0;
1071                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1072                                 coal_conf->tx_coal_type = NO_COALESCE;
1073                         }
1074
1075                 }
1076                 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1077
1078                         if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1079                                 coal_conf->tx_timeout = 1;
1080                                 coal_conf->tx_event_count = 2;
1081                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1082                                 coal_conf->tx_coal_type = LOW_COALESCE;
1083
1084                         }
1085                 }
1086                 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1087
1088                         if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1089                                 coal_conf->tx_timeout = 2;
1090                                 coal_conf->tx_event_count = 5;
1091                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1092                                 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1093                         }
1094
1095                 }
1096                 else if(tx_pkt_size >= 1024){
1097                         if (tx_pkt_size >= 1024){
1098                                 if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1099                                         coal_conf->tx_timeout = 4;
1100                                         coal_conf->tx_event_count = 8;
1101                                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1102                                         coal_conf->tx_coal_type = HIGH_COALESCE;
1103                                 }
1104                         }
1105                 }
1106         }
1107         return 0;
1108
1109 }
1110
1111 /* This is device interrupt function. It handles transmit,
1112  * receive,link change and hardware timer interrupts.
1113  */
1114 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1115 {
1116
1117         struct net_device * dev = (struct net_device *) dev_id;
1118         struct amd8111e_priv *lp = netdev_priv(dev);
1119         void __iomem *mmio = lp->mmio;
1120         unsigned int intr0, intren0;
1121         unsigned int handled = 1;
1122
1123         if(unlikely(dev == NULL))
1124                 return IRQ_NONE;
1125
1126         spin_lock(&lp->lock);
1127
1128         /* disabling interrupt */
1129         writel(INTREN, mmio + CMD0);
1130
1131         /* Read interrupt status */
1132         intr0 = readl(mmio + INT0);
1133         intren0 = readl(mmio + INTEN0);
1134
1135         /* Process all the INT event until INTR bit is clear. */
1136
1137         if (!(intr0 & INTR)){
1138                 handled = 0;
1139                 goto err_no_interrupt;
1140         }
1141
1142         /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1143         writel(intr0, mmio + INT0);
1144
1145         /* Check if Receive Interrupt has occurred. */
1146         if (intr0 & RINT0) {
1147                 if (napi_schedule_prep(&lp->napi)) {
1148                         /* Disable receive interupts */
1149                         writel(RINTEN0, mmio + INTEN0);
1150                         /* Schedule a polling routine */
1151                         __napi_schedule(&lp->napi);
1152                 } else if (intren0 & RINTEN0) {
1153                         netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
1154                         /* Fix by disable receive interrupts */
1155                         writel(RINTEN0, mmio + INTEN0);
1156                 }
1157         }
1158
1159         /* Check if  Transmit Interrupt has occurred. */
1160         if (intr0 & TINT0)
1161                 amd8111e_tx(dev);
1162
1163         /* Check if  Link Change Interrupt has occurred. */
1164         if (intr0 & LCINT)
1165                 amd8111e_link_change(dev);
1166
1167         /* Check if Hardware Timer Interrupt has occurred. */
1168         if (intr0 & STINT)
1169                 amd8111e_calc_coalesce(dev);
1170
1171 err_no_interrupt:
1172         writel( VAL0 | INTREN,mmio + CMD0);
1173
1174         spin_unlock(&lp->lock);
1175
1176         return IRQ_RETVAL(handled);
1177 }
1178
1179 #ifdef CONFIG_NET_POLL_CONTROLLER
1180 static void amd8111e_poll(struct net_device *dev)
1181 {
1182         unsigned long flags;
1183         local_irq_save(flags);
1184         amd8111e_interrupt(0, dev);
1185         local_irq_restore(flags);
1186 }
1187 #endif
1188
1189
1190 /* This function closes the network interface and updates
1191  * the statistics so that most recent statistics will be
1192  * available after the interface is down.
1193  */
1194 static int amd8111e_close(struct net_device * dev)
1195 {
1196         struct amd8111e_priv *lp = netdev_priv(dev);
1197         netif_stop_queue(dev);
1198
1199         napi_disable(&lp->napi);
1200
1201         spin_lock_irq(&lp->lock);
1202
1203         amd8111e_disable_interrupt(lp);
1204         amd8111e_stop_chip(lp);
1205
1206         /* Free transmit and receive skbs */
1207         amd8111e_free_skbs(lp->amd8111e_net_dev);
1208
1209         netif_carrier_off(lp->amd8111e_net_dev);
1210
1211         /* Delete ipg timer */
1212         if(lp->options & OPTION_DYN_IPG_ENABLE)
1213                 del_timer_sync(&lp->ipg_data.ipg_timer);
1214
1215         spin_unlock_irq(&lp->lock);
1216         free_irq(dev->irq, dev);
1217         amd8111e_free_ring(lp);
1218
1219         /* Update the statistics before closing */
1220         amd8111e_get_stats(dev);
1221         lp->opened = 0;
1222         return 0;
1223 }
1224
1225 /* This function opens new interface.It requests irq for the device,
1226  * initializes the device,buffers and descriptors, and starts the device.
1227  */
1228 static int amd8111e_open(struct net_device * dev )
1229 {
1230         struct amd8111e_priv *lp = netdev_priv(dev);
1231
1232         if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1233                                          dev->name, dev))
1234                 return -EAGAIN;
1235
1236         napi_enable(&lp->napi);
1237
1238         spin_lock_irq(&lp->lock);
1239
1240         amd8111e_init_hw_default(lp);
1241
1242         if(amd8111e_restart(dev)){
1243                 spin_unlock_irq(&lp->lock);
1244                 napi_disable(&lp->napi);
1245                 if (dev->irq)
1246                         free_irq(dev->irq, dev);
1247                 return -ENOMEM;
1248         }
1249         /* Start ipg timer */
1250         if(lp->options & OPTION_DYN_IPG_ENABLE){
1251                 add_timer(&lp->ipg_data.ipg_timer);
1252                 netdev_info(dev, "Dynamic IPG Enabled\n");
1253         }
1254
1255         lp->opened = 1;
1256
1257         spin_unlock_irq(&lp->lock);
1258
1259         netif_start_queue(dev);
1260
1261         return 0;
1262 }
1263
1264 /* This function checks if there is any transmit  descriptors
1265  * available to queue more packet.
1266  */
1267 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1268 {
1269         int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1270         if (lp->tx_skbuff[tx_index])
1271                 return -1;
1272         else
1273                 return 0;
1274
1275 }
1276
1277 /* This function will queue the transmit packets to the
1278  * descriptors and will trigger the send operation. It also
1279  * initializes the transmit descriptors with buffer physical address,
1280  * byte count, ownership to hardware etc.
1281  */
1282 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1283                                        struct net_device * dev)
1284 {
1285         struct amd8111e_priv *lp = netdev_priv(dev);
1286         int tx_index;
1287         unsigned long flags;
1288
1289         spin_lock_irqsave(&lp->lock, flags);
1290
1291         tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1292
1293         lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1294
1295         lp->tx_skbuff[tx_index] = skb;
1296         lp->tx_ring[tx_index].tx_flags = 0;
1297
1298 #if AMD8111E_VLAN_TAG_USED
1299         if (vlan_tx_tag_present(skb)) {
1300                 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1301                                 cpu_to_le16(TCC_VLAN_INSERT);
1302                 lp->tx_ring[tx_index].tag_ctrl_info =
1303                                 cpu_to_le16(vlan_tx_tag_get(skb));
1304
1305         }
1306 #endif
1307         lp->tx_dma_addr[tx_index] =
1308             pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1309         lp->tx_ring[tx_index].buff_phy_addr =
1310             cpu_to_le32(lp->tx_dma_addr[tx_index]);
1311
1312         /*  Set FCS and LTINT bits */
1313         wmb();
1314         lp->tx_ring[tx_index].tx_flags |=
1315             cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1316
1317         lp->tx_idx++;
1318
1319         /* Trigger an immediate send poll. */
1320         writel( VAL1 | TDMD0, lp->mmio + CMD0);
1321         writel( VAL2 | RDMD0,lp->mmio + CMD0);
1322
1323         if(amd8111e_tx_queue_avail(lp) < 0){
1324                 netif_stop_queue(dev);
1325         }
1326         spin_unlock_irqrestore(&lp->lock, flags);
1327         return NETDEV_TX_OK;
1328 }
1329 /* This function returns all the memory mapped registers of the device. */
1330 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1331 {
1332         void __iomem *mmio = lp->mmio;
1333         /* Read only necessary registers */
1334         buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1335         buf[1] = readl(mmio + XMT_RING_LEN0);
1336         buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1337         buf[3] = readl(mmio + RCV_RING_LEN0);
1338         buf[4] = readl(mmio + CMD0);
1339         buf[5] = readl(mmio + CMD2);
1340         buf[6] = readl(mmio + CMD3);
1341         buf[7] = readl(mmio + CMD7);
1342         buf[8] = readl(mmio + INT0);
1343         buf[9] = readl(mmio + INTEN0);
1344         buf[10] = readl(mmio + LADRF);
1345         buf[11] = readl(mmio + LADRF+4);
1346         buf[12] = readl(mmio + STAT0);
1347 }
1348
1349
1350 /* This function sets promiscuos mode, all-multi mode or the multicast address
1351  * list to the device.
1352  */
1353 static void amd8111e_set_multicast_list(struct net_device *dev)
1354 {
1355         struct netdev_hw_addr *ha;
1356         struct amd8111e_priv *lp = netdev_priv(dev);
1357         u32 mc_filter[2] ;
1358         int bit_num;
1359
1360         if(dev->flags & IFF_PROMISC){
1361                 writel( VAL2 | PROM, lp->mmio + CMD2);
1362                 return;
1363         }
1364         else
1365                 writel( PROM, lp->mmio + CMD2);
1366         if (dev->flags & IFF_ALLMULTI ||
1367             netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1368                 /* get all multicast packet */
1369                 mc_filter[1] = mc_filter[0] = 0xffffffff;
1370                 lp->options |= OPTION_MULTICAST_ENABLE;
1371                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1372                 return;
1373         }
1374         if (netdev_mc_empty(dev)) {
1375                 /* get only own packets */
1376                 mc_filter[1] = mc_filter[0] = 0;
1377                 lp->options &= ~OPTION_MULTICAST_ENABLE;
1378                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1379                 /* disable promiscuous mode */
1380                 writel(PROM, lp->mmio + CMD2);
1381                 return;
1382         }
1383         /* load all the multicast addresses in the logic filter */
1384         lp->options |= OPTION_MULTICAST_ENABLE;
1385         mc_filter[1] = mc_filter[0] = 0;
1386         netdev_for_each_mc_addr(ha, dev) {
1387                 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1388                 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1389         }
1390         amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1391
1392         /* To eliminate PCI posting bug */
1393         readl(lp->mmio + CMD2);
1394
1395 }
1396
1397 static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1398 {
1399         struct amd8111e_priv *lp = netdev_priv(dev);
1400         struct pci_dev *pci_dev = lp->pci_dev;
1401         strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
1402         strlcpy(info->version, MODULE_VERS, sizeof(info->version));
1403         snprintf(info->fw_version, sizeof(info->fw_version),
1404                 "%u", chip_version);
1405         strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1406 }
1407
1408 static int amd8111e_get_regs_len(struct net_device *dev)
1409 {
1410         return AMD8111E_REG_DUMP_LEN;
1411 }
1412
1413 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1414 {
1415         struct amd8111e_priv *lp = netdev_priv(dev);
1416         regs->version = 0;
1417         amd8111e_read_regs(lp, buf);
1418 }
1419
1420 static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1421 {
1422         struct amd8111e_priv *lp = netdev_priv(dev);
1423         spin_lock_irq(&lp->lock);
1424         mii_ethtool_gset(&lp->mii_if, ecmd);
1425         spin_unlock_irq(&lp->lock);
1426         return 0;
1427 }
1428
1429 static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1430 {
1431         struct amd8111e_priv *lp = netdev_priv(dev);
1432         int res;
1433         spin_lock_irq(&lp->lock);
1434         res = mii_ethtool_sset(&lp->mii_if, ecmd);
1435         spin_unlock_irq(&lp->lock);
1436         return res;
1437 }
1438
1439 static int amd8111e_nway_reset(struct net_device *dev)
1440 {
1441         struct amd8111e_priv *lp = netdev_priv(dev);
1442         return mii_nway_restart(&lp->mii_if);
1443 }
1444
1445 static u32 amd8111e_get_link(struct net_device *dev)
1446 {
1447         struct amd8111e_priv *lp = netdev_priv(dev);
1448         return mii_link_ok(&lp->mii_if);
1449 }
1450
1451 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1452 {
1453         struct amd8111e_priv *lp = netdev_priv(dev);
1454         wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1455         if (lp->options & OPTION_WOL_ENABLE)
1456                 wol_info->wolopts = WAKE_MAGIC;
1457 }
1458
1459 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1460 {
1461         struct amd8111e_priv *lp = netdev_priv(dev);
1462         if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1463                 return -EINVAL;
1464         spin_lock_irq(&lp->lock);
1465         if (wol_info->wolopts & WAKE_MAGIC)
1466                 lp->options |=
1467                         (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1468         else if(wol_info->wolopts & WAKE_PHY)
1469                 lp->options |=
1470                         (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1471         else
1472                 lp->options &= ~OPTION_WOL_ENABLE;
1473         spin_unlock_irq(&lp->lock);
1474         return 0;
1475 }
1476
1477 static const struct ethtool_ops ops = {
1478         .get_drvinfo = amd8111e_get_drvinfo,
1479         .get_regs_len = amd8111e_get_regs_len,
1480         .get_regs = amd8111e_get_regs,
1481         .get_settings = amd8111e_get_settings,
1482         .set_settings = amd8111e_set_settings,
1483         .nway_reset = amd8111e_nway_reset,
1484         .get_link = amd8111e_get_link,
1485         .get_wol = amd8111e_get_wol,
1486         .set_wol = amd8111e_set_wol,
1487 };
1488
1489 /* This function handles all the  ethtool ioctls. It gives driver info,
1490  * gets/sets driver speed, gets memory mapped register values, forces
1491  * auto negotiation, sets/gets WOL options for ethtool application.
1492  */
1493 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1494 {
1495         struct mii_ioctl_data *data = if_mii(ifr);
1496         struct amd8111e_priv *lp = netdev_priv(dev);
1497         int err;
1498         u32 mii_regval;
1499
1500         switch(cmd) {
1501         case SIOCGMIIPHY:
1502                 data->phy_id = lp->ext_phy_addr;
1503
1504         /* fallthru */
1505         case SIOCGMIIREG:
1506
1507                 spin_lock_irq(&lp->lock);
1508                 err = amd8111e_read_phy(lp, data->phy_id,
1509                         data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1510                 spin_unlock_irq(&lp->lock);
1511
1512                 data->val_out = mii_regval;
1513                 return err;
1514
1515         case SIOCSMIIREG:
1516
1517                 spin_lock_irq(&lp->lock);
1518                 err = amd8111e_write_phy(lp, data->phy_id,
1519                         data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1520                 spin_unlock_irq(&lp->lock);
1521
1522                 return err;
1523
1524         default:
1525                 /* do nothing */
1526                 break;
1527         }
1528         return -EOPNOTSUPP;
1529 }
1530 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1531 {
1532         struct amd8111e_priv *lp = netdev_priv(dev);
1533         int i;
1534         struct sockaddr *addr = p;
1535
1536         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1537         spin_lock_irq(&lp->lock);
1538         /* Setting the MAC address to the device */
1539         for (i = 0; i < ETH_ALEN; i++)
1540                 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1541
1542         spin_unlock_irq(&lp->lock);
1543
1544         return 0;
1545 }
1546
1547 /* This function changes the mtu of the device. It restarts the device  to
1548  * initialize the descriptor with new receive buffers.
1549  */
1550 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1551 {
1552         struct amd8111e_priv *lp = netdev_priv(dev);
1553         int err;
1554
1555         if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1556                 return -EINVAL;
1557
1558         if (!netif_running(dev)) {
1559                 /* new_mtu will be used
1560                  * when device starts netxt time
1561                  */
1562                 dev->mtu = new_mtu;
1563                 return 0;
1564         }
1565
1566         spin_lock_irq(&lp->lock);
1567
1568         /* stop the chip */
1569         writel(RUN, lp->mmio + CMD0);
1570
1571         dev->mtu = new_mtu;
1572
1573         err = amd8111e_restart(dev);
1574         spin_unlock_irq(&lp->lock);
1575         if(!err)
1576                 netif_start_queue(dev);
1577         return err;
1578 }
1579
1580 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1581 {
1582         writel( VAL1|MPPLBA, lp->mmio + CMD3);
1583         writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1584
1585         /* To eliminate PCI posting bug */
1586         readl(lp->mmio + CMD7);
1587         return 0;
1588 }
1589
1590 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1591 {
1592
1593         /* Adapter is already stoped/suspended/interrupt-disabled */
1594         writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1595
1596         /* To eliminate PCI posting bug */
1597         readl(lp->mmio + CMD7);
1598         return 0;
1599 }
1600
1601 /* This function is called when a packet transmission fails to complete
1602  * within a reasonable period, on the assumption that an interrupt have
1603  * failed or the interface is locked up. This function will reinitialize
1604  * the hardware.
1605  */
1606 static void amd8111e_tx_timeout(struct net_device *dev)
1607 {
1608         struct amd8111e_priv* lp = netdev_priv(dev);
1609         int err;
1610
1611         netdev_err(dev, "transmit timed out, resetting\n");
1612
1613         spin_lock_irq(&lp->lock);
1614         err = amd8111e_restart(dev);
1615         spin_unlock_irq(&lp->lock);
1616         if(!err)
1617                 netif_wake_queue(dev);
1618 }
1619 static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1620 {
1621         struct net_device *dev = pci_get_drvdata(pci_dev);
1622         struct amd8111e_priv *lp = netdev_priv(dev);
1623
1624         if (!netif_running(dev))
1625                 return 0;
1626
1627         /* disable the interrupt */
1628         spin_lock_irq(&lp->lock);
1629         amd8111e_disable_interrupt(lp);
1630         spin_unlock_irq(&lp->lock);
1631
1632         netif_device_detach(dev);
1633
1634         /* stop chip */
1635         spin_lock_irq(&lp->lock);
1636         if(lp->options & OPTION_DYN_IPG_ENABLE)
1637                 del_timer_sync(&lp->ipg_data.ipg_timer);
1638         amd8111e_stop_chip(lp);
1639         spin_unlock_irq(&lp->lock);
1640
1641         if(lp->options & OPTION_WOL_ENABLE){
1642                  /* enable wol */
1643                 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1644                         amd8111e_enable_magicpkt(lp);
1645                 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1646                         amd8111e_enable_link_change(lp);
1647
1648                 pci_enable_wake(pci_dev, PCI_D3hot, 1);
1649                 pci_enable_wake(pci_dev, PCI_D3cold, 1);
1650
1651         }
1652         else{
1653                 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1654                 pci_enable_wake(pci_dev, PCI_D3cold, 0);
1655         }
1656
1657         pci_save_state(pci_dev);
1658         pci_set_power_state(pci_dev, PCI_D3hot);
1659
1660         return 0;
1661 }
1662 static int amd8111e_resume(struct pci_dev *pci_dev)
1663 {
1664         struct net_device *dev = pci_get_drvdata(pci_dev);
1665         struct amd8111e_priv *lp = netdev_priv(dev);
1666
1667         if (!netif_running(dev))
1668                 return 0;
1669
1670         pci_set_power_state(pci_dev, PCI_D0);
1671         pci_restore_state(pci_dev);
1672
1673         pci_enable_wake(pci_dev, PCI_D3hot, 0);
1674         pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1675
1676         netif_device_attach(dev);
1677
1678         spin_lock_irq(&lp->lock);
1679         amd8111e_restart(dev);
1680         /* Restart ipg timer */
1681         if(lp->options & OPTION_DYN_IPG_ENABLE)
1682                 mod_timer(&lp->ipg_data.ipg_timer,
1683                                 jiffies + IPG_CONVERGE_JIFFIES);
1684         spin_unlock_irq(&lp->lock);
1685
1686         return 0;
1687 }
1688
1689 static void amd8111e_config_ipg(struct net_device* dev)
1690 {
1691         struct amd8111e_priv *lp = netdev_priv(dev);
1692         struct ipg_info* ipg_data = &lp->ipg_data;
1693         void __iomem *mmio = lp->mmio;
1694         unsigned int prev_col_cnt = ipg_data->col_cnt;
1695         unsigned int total_col_cnt;
1696         unsigned int tmp_ipg;
1697
1698         if(lp->link_config.duplex == DUPLEX_FULL){
1699                 ipg_data->ipg = DEFAULT_IPG;
1700                 return;
1701         }
1702
1703         if(ipg_data->ipg_state == SSTATE){
1704
1705                 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1706
1707                         ipg_data->timer_tick = 0;
1708                         ipg_data->ipg = MIN_IPG - IPG_STEP;
1709                         ipg_data->current_ipg = MIN_IPG;
1710                         ipg_data->diff_col_cnt = 0xFFFFFFFF;
1711                         ipg_data->ipg_state = CSTATE;
1712                 }
1713                 else
1714                         ipg_data->timer_tick++;
1715         }
1716
1717         if(ipg_data->ipg_state == CSTATE){
1718
1719                 /* Get the current collision count */
1720
1721                 total_col_cnt = ipg_data->col_cnt =
1722                                 amd8111e_read_mib(mmio, xmt_collisions);
1723
1724                 if ((total_col_cnt - prev_col_cnt) <
1725                                 (ipg_data->diff_col_cnt)){
1726
1727                         ipg_data->diff_col_cnt =
1728                                 total_col_cnt - prev_col_cnt ;
1729
1730                         ipg_data->ipg = ipg_data->current_ipg;
1731                 }
1732
1733                 ipg_data->current_ipg += IPG_STEP;
1734
1735                 if (ipg_data->current_ipg <= MAX_IPG)
1736                         tmp_ipg = ipg_data->current_ipg;
1737                 else{
1738                         tmp_ipg = ipg_data->ipg;
1739                         ipg_data->ipg_state = SSTATE;
1740                 }
1741                 writew((u32)tmp_ipg, mmio + IPG);
1742                 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1743         }
1744          mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1745         return;
1746
1747 }
1748
1749 static void amd8111e_probe_ext_phy(struct net_device *dev)
1750 {
1751         struct amd8111e_priv *lp = netdev_priv(dev);
1752         int i;
1753
1754         for (i = 0x1e; i >= 0; i--) {
1755                 u32 id1, id2;
1756
1757                 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1758                         continue;
1759                 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1760                         continue;
1761                 lp->ext_phy_id = (id1 << 16) | id2;
1762                 lp->ext_phy_addr = i;
1763                 return;
1764         }
1765         lp->ext_phy_id = 0;
1766         lp->ext_phy_addr = 1;
1767 }
1768
1769 static const struct net_device_ops amd8111e_netdev_ops = {
1770         .ndo_open               = amd8111e_open,
1771         .ndo_stop               = amd8111e_close,
1772         .ndo_start_xmit         = amd8111e_start_xmit,
1773         .ndo_tx_timeout         = amd8111e_tx_timeout,
1774         .ndo_get_stats          = amd8111e_get_stats,
1775         .ndo_set_rx_mode        = amd8111e_set_multicast_list,
1776         .ndo_validate_addr      = eth_validate_addr,
1777         .ndo_set_mac_address    = amd8111e_set_mac_address,
1778         .ndo_do_ioctl           = amd8111e_ioctl,
1779         .ndo_change_mtu         = amd8111e_change_mtu,
1780 #ifdef CONFIG_NET_POLL_CONTROLLER
1781         .ndo_poll_controller     = amd8111e_poll,
1782 #endif
1783 };
1784
1785 static int amd8111e_probe_one(struct pci_dev *pdev,
1786                                   const struct pci_device_id *ent)
1787 {
1788         int err, i;
1789         unsigned long reg_addr,reg_len;
1790         struct amd8111e_priv* lp;
1791         struct net_device* dev;
1792
1793         err = pci_enable_device(pdev);
1794         if(err){
1795                 dev_err(&pdev->dev, "Cannot enable new PCI device\n");
1796                 return err;
1797         }
1798
1799         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1800                 dev_err(&pdev->dev, "Cannot find PCI base address\n");
1801                 err = -ENODEV;
1802                 goto err_disable_pdev;
1803         }
1804
1805         err = pci_request_regions(pdev, MODULE_NAME);
1806         if(err){
1807                 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
1808                 goto err_disable_pdev;
1809         }
1810
1811         pci_set_master(pdev);
1812
1813         /* Find power-management capability. */
1814         if (!pdev->pm_cap) {
1815                 dev_err(&pdev->dev, "No Power Management capability\n");
1816                 err = -ENODEV;
1817                 goto err_free_reg;
1818         }
1819
1820         /* Initialize DMA */
1821         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1822                 dev_err(&pdev->dev, "DMA not supported\n");
1823                 err = -ENODEV;
1824                 goto err_free_reg;
1825         }
1826
1827         reg_addr = pci_resource_start(pdev, 0);
1828         reg_len = pci_resource_len(pdev, 0);
1829
1830         dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1831         if (!dev) {
1832                 err = -ENOMEM;
1833                 goto err_free_reg;
1834         }
1835
1836         SET_NETDEV_DEV(dev, &pdev->dev);
1837
1838 #if AMD8111E_VLAN_TAG_USED
1839         dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
1840 #endif
1841
1842         lp = netdev_priv(dev);
1843         lp->pci_dev = pdev;
1844         lp->amd8111e_net_dev = dev;
1845         lp->pm_cap = pdev->pm_cap;
1846
1847         spin_lock_init(&lp->lock);
1848
1849         lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
1850         if (!lp->mmio) {
1851                 dev_err(&pdev->dev, "Cannot map device registers\n");
1852                 err = -ENOMEM;
1853                 goto err_free_dev;
1854         }
1855
1856         /* Initializing MAC address */
1857         for (i = 0; i < ETH_ALEN; i++)
1858                 dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1859
1860         /* Setting user defined parametrs */
1861         lp->ext_phy_option = speed_duplex[card_idx];
1862         if(coalesce[card_idx])
1863                 lp->options |= OPTION_INTR_COAL_ENABLE;
1864         if(dynamic_ipg[card_idx++])
1865                 lp->options |= OPTION_DYN_IPG_ENABLE;
1866
1867
1868         /* Initialize driver entry points */
1869         dev->netdev_ops = &amd8111e_netdev_ops;
1870         dev->ethtool_ops = &ops;
1871         dev->irq =pdev->irq;
1872         dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1873         netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1874
1875 #if AMD8111E_VLAN_TAG_USED
1876         dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1877 #endif
1878         /* Probe the external PHY */
1879         amd8111e_probe_ext_phy(dev);
1880
1881         /* setting mii default values */
1882         lp->mii_if.dev = dev;
1883         lp->mii_if.mdio_read = amd8111e_mdio_read;
1884         lp->mii_if.mdio_write = amd8111e_mdio_write;
1885         lp->mii_if.phy_id = lp->ext_phy_addr;
1886
1887         /* Set receive buffer length and set jumbo option*/
1888         amd8111e_set_rx_buff_len(dev);
1889
1890
1891         err = register_netdev(dev);
1892         if (err) {
1893                 dev_err(&pdev->dev, "Cannot register net device\n");
1894                 goto err_free_dev;
1895         }
1896
1897         pci_set_drvdata(pdev, dev);
1898
1899         /* Initialize software ipg timer */
1900         if(lp->options & OPTION_DYN_IPG_ENABLE){
1901                 init_timer(&lp->ipg_data.ipg_timer);
1902                 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1903                 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1904                 lp->ipg_data.ipg_timer.expires = jiffies +
1905                                                  IPG_CONVERGE_JIFFIES;
1906                 lp->ipg_data.ipg = DEFAULT_IPG;
1907                 lp->ipg_data.ipg_state = CSTATE;
1908         }
1909
1910         /*  display driver and device information */
1911         chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1912         dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
1913         dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1914                  chip_version, dev->dev_addr);
1915         if (lp->ext_phy_id)
1916                 dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
1917                          lp->ext_phy_id, lp->ext_phy_addr);
1918         else
1919                 dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
1920
1921         return 0;
1922
1923 err_free_dev:
1924         free_netdev(dev);
1925
1926 err_free_reg:
1927         pci_release_regions(pdev);
1928
1929 err_disable_pdev:
1930         pci_disable_device(pdev);
1931         return err;
1932
1933 }
1934
1935 static void amd8111e_remove_one(struct pci_dev *pdev)
1936 {
1937         struct net_device *dev = pci_get_drvdata(pdev);
1938
1939         if (dev) {
1940                 unregister_netdev(dev);
1941                 free_netdev(dev);
1942                 pci_release_regions(pdev);
1943                 pci_disable_device(pdev);
1944         }
1945 }
1946
1947 static const struct pci_device_id amd8111e_pci_tbl[] = {
1948         {
1949          .vendor = PCI_VENDOR_ID_AMD,
1950          .device = PCI_DEVICE_ID_AMD8111E_7462,
1951         },
1952         {
1953          .vendor = 0,
1954         }
1955 };
1956 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
1957
1958 static struct pci_driver amd8111e_driver = {
1959         .name           = MODULE_NAME,
1960         .id_table       = amd8111e_pci_tbl,
1961         .probe          = amd8111e_probe_one,
1962         .remove         = amd8111e_remove_one,
1963         .suspend        = amd8111e_suspend,
1964         .resume         = amd8111e_resume
1965 };
1966
1967 module_pci_driver(amd8111e_driver);