]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qlge/qlge_main.c
net: introduce and use netdev_features_t for device features sets
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378
379                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380                                      "Adding %s address %pM at index %d in the CAM.\n",
381                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
382                                      "MULTICAST" : "UNICAST",
383                                      addr, index);
384
385                         status =
386                             ql_wait_reg_rdy(qdev,
387                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388                         if (status)
389                                 goto exit;
390                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
392                                    type);       /* type */
393                         ql_write32(qdev, MAC_ADDR_DATA, lower);
394                         status =
395                             ql_wait_reg_rdy(qdev,
396                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397                         if (status)
398                                 goto exit;
399                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
401                                    type);       /* type */
402                         ql_write32(qdev, MAC_ADDR_DATA, upper);
403                         status =
404                             ql_wait_reg_rdy(qdev,
405                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406                         if (status)
407                                 goto exit;
408                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
409                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
410                                    type);       /* type */
411                         /* This field should also include the queue id
412                            and possibly the function id.  Right now we hardcode
413                            the route field to NIC core.
414                          */
415                         cam_output = (CAM_OUT_ROUTE_NIC |
416                                       (qdev->
417                                        func << CAM_OUT_FUNC_SHIFT) |
418                                         (0 << CAM_OUT_CQ_ID_SHIFT));
419                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420                                 cam_output |= CAM_OUT_RV;
421                         /* route to NIC core */
422                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423                         break;
424                 }
425         case MAC_ADDR_TYPE_VLAN:
426                 {
427                         u32 enable_bit = *((u32 *) &addr[0]);
428                         /* For VLAN, the addr actually holds a bit that
429                          * either enables or disables the vlan id we are
430                          * addressing. It's either MAC_ADDR_E on or off.
431                          * That's bit-27 we're talking about.
432                          */
433                         netif_info(qdev, ifup, qdev->ndev,
434                                    "%s VLAN ID %d %s the CAM.\n",
435                                    enable_bit ? "Adding" : "Removing",
436                                    index,
437                                    enable_bit ? "to" : "from");
438
439                         status =
440                             ql_wait_reg_rdy(qdev,
441                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442                         if (status)
443                                 goto exit;
444                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
446                                    type |       /* type */
447                                    enable_bit); /* enable/disable */
448                         break;
449                 }
450         case MAC_ADDR_TYPE_MULTI_FLTR:
451         default:
452                 netif_crit(qdev, ifup, qdev->ndev,
453                            "Address type %d not yet supported.\n", type);
454                 status = -EPERM;
455         }
456 exit:
457         return status;
458 }
459
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466         int status;
467         char zero_mac_addr[ETH_ALEN];
468         char *addr;
469
470         if (set) {
471                 addr = &qdev->current_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Set Mac addr %pM\n", addr);
474         } else {
475                 memset(zero_mac_addr, 0, ETH_ALEN);
476                 addr = &zero_mac_addr[0];
477                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478                              "Clearing MAC address\n");
479         }
480         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 return status;
483         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486         if (status)
487                 netif_err(qdev, ifup, qdev->ndev,
488                           "Failed to init mac address.\n");
489         return status;
490 }
491
492 void ql_link_on(struct ql_adapter *qdev)
493 {
494         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495         netif_carrier_on(qdev->ndev);
496         ql_set_mac_addr(qdev, 1);
497 }
498
499 void ql_link_off(struct ql_adapter *qdev)
500 {
501         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502         netif_carrier_off(qdev->ndev);
503         ql_set_mac_addr(qdev, 0);
504 }
505
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511         int status = 0;
512
513         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514         if (status)
515                 goto exit;
516
517         ql_write32(qdev, RT_IDX,
518                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520         if (status)
521                 goto exit;
522         *value = ql_read32(qdev, RT_DATA);
523 exit:
524         return status;
525 }
526
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533                               int enable)
534 {
535         int status = -EINVAL; /* Return error if no mask match. */
536         u32 value = 0;
537
538         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539                      "%s %s mask %s the routing reg.\n",
540                      enable ? "Adding" : "Removing",
541                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
555                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
556                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557                      "(Bad index != RT_IDX)",
558                      enable ? "to" : "from");
559
560         switch (mask) {
561         case RT_IDX_CAM_HIT:
562                 {
563                         value = RT_IDX_DST_CAM_Q |      /* dest */
564                             RT_IDX_TYPE_NICQ |  /* type */
565                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566                         break;
567                 }
568         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
569                 {
570                         value = RT_IDX_DST_DFLT_Q |     /* dest */
571                             RT_IDX_TYPE_NICQ |  /* type */
572                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573                         break;
574                 }
575         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
576                 {
577                         value = RT_IDX_DST_DFLT_Q |     /* dest */
578                             RT_IDX_TYPE_NICQ |  /* type */
579                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580                         break;
581                 }
582         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583                 {
584                         value = RT_IDX_DST_DFLT_Q | /* dest */
585                                 RT_IDX_TYPE_NICQ | /* type */
586                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
587                                 RT_IDX_IDX_SHIFT); /* index */
588                         break;
589                 }
590         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q | /* dest */
593                                 RT_IDX_TYPE_NICQ | /* type */
594                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595                                 RT_IDX_IDX_SHIFT); /* index */
596                         break;
597                 }
598         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
599                 {
600                         value = RT_IDX_DST_DFLT_Q |     /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
613                 {
614                         value = RT_IDX_DST_DFLT_Q |     /* dest */
615                             RT_IDX_TYPE_NICQ |  /* type */
616                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617                         break;
618                 }
619         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
620                 {
621                         value = RT_IDX_DST_RSS |        /* dest */
622                             RT_IDX_TYPE_NICQ |  /* type */
623                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624                         break;
625                 }
626         case 0:         /* Clear the E-bit on an entry. */
627                 {
628                         value = RT_IDX_DST_DFLT_Q |     /* dest */
629                             RT_IDX_TYPE_NICQ |  /* type */
630                             (index << RT_IDX_IDX_SHIFT);/* index */
631                         break;
632                 }
633         default:
634                 netif_err(qdev, ifup, qdev->ndev,
635                           "Mask type %d not yet supported.\n", mask);
636                 status = -EPERM;
637                 goto exit;
638         }
639
640         if (value) {
641                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642                 if (status)
643                         goto exit;
644                 value |= (enable ? RT_IDX_E : 0);
645                 ql_write32(qdev, RT_IDX, value);
646                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647         }
648 exit:
649         return status;
650 }
651
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670         u32 var = 0;
671         unsigned long hw_flags = 0;
672         struct intr_context *ctx = qdev->intr_context + intr;
673
674         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675                 /* Always enable if we're MSIX multi interrupts and
676                  * it's not the default (zeroeth) interrupt.
677                  */
678                 ql_write32(qdev, INTR_EN,
679                            ctx->intr_en_mask);
680                 var = ql_read32(qdev, STS);
681                 return var;
682         }
683
684         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685         if (atomic_dec_and_test(&ctx->irq_cnt)) {
686                 ql_write32(qdev, INTR_EN,
687                            ctx->intr_en_mask);
688                 var = ql_read32(qdev, STS);
689         }
690         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691         return var;
692 }
693
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696         u32 var = 0;
697         struct intr_context *ctx;
698
699         /* HW disables for us if we're MSIX multi interrupts and
700          * it's not the default (zeroeth) interrupt.
701          */
702         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703                 return 0;
704
705         ctx = qdev->intr_context + intr;
706         spin_lock(&qdev->hw_lock);
707         if (!atomic_read(&ctx->irq_cnt)) {
708                 ql_write32(qdev, INTR_EN,
709                 ctx->intr_dis_mask);
710                 var = ql_read32(qdev, STS);
711         }
712         atomic_inc(&ctx->irq_cnt);
713         spin_unlock(&qdev->hw_lock);
714         return var;
715 }
716
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719         int i;
720         for (i = 0; i < qdev->intr_count; i++) {
721                 /* The enable call does a atomic_dec_and_test
722                  * and enables only if the result is zero.
723                  * So we precharge it here.
724                  */
725                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726                         i == 0))
727                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728                 ql_enable_completion_interrupt(qdev, i);
729         }
730
731 }
732
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735         int status, i;
736         u16 csum = 0;
737         __le16 *flash = (__le16 *)&qdev->flash;
738
739         status = strncmp((char *)&qdev->flash, str, 4);
740         if (status) {
741                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742                 return  status;
743         }
744
745         for (i = 0; i < size; i++)
746                 csum += le16_to_cpu(*flash++);
747
748         if (csum)
749                 netif_err(qdev, ifup, qdev->ndev,
750                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752         return csum;
753 }
754
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757         int status = 0;
758         /* wait for reg to come ready */
759         status = ql_wait_reg_rdy(qdev,
760                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761         if (status)
762                 goto exit;
763         /* set up for reg read */
764         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765         /* wait for reg to come ready */
766         status = ql_wait_reg_rdy(qdev,
767                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768         if (status)
769                 goto exit;
770          /* This data is stored on flash as an array of
771          * __le32.  Since ql_read32() returns cpu endian
772          * we need to swap it back.
773          */
774         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776         return status;
777 }
778
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781         u32 i, size;
782         int status;
783         __le32 *p = (__le32 *)&qdev->flash;
784         u32 offset;
785         u8 mac_addr[6];
786
787         /* Get flash offset for function and adjust
788          * for dword access.
789          */
790         if (!qdev->port)
791                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792         else
793                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796                 return -ETIMEDOUT;
797
798         size = sizeof(struct flash_params_8000) / sizeof(u32);
799         for (i = 0; i < size; i++, p++) {
800                 status = ql_read_flash_word(qdev, i+offset, p);
801                 if (status) {
802                         netif_err(qdev, ifup, qdev->ndev,
803                                   "Error reading flash.\n");
804                         goto exit;
805                 }
806         }
807
808         status = ql_validate_flash(qdev,
809                         sizeof(struct flash_params_8000) / sizeof(u16),
810                         "8000");
811         if (status) {
812                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813                 status = -EINVAL;
814                 goto exit;
815         }
816
817         /* Extract either manufacturer or BOFM modified
818          * MAC address.
819          */
820         if (qdev->flash.flash_params_8000.data_type1 == 2)
821                 memcpy(mac_addr,
822                         qdev->flash.flash_params_8000.mac_addr1,
823                         qdev->ndev->addr_len);
824         else
825                 memcpy(mac_addr,
826                         qdev->flash.flash_params_8000.mac_addr,
827                         qdev->ndev->addr_len);
828
829         if (!is_valid_ether_addr(mac_addr)) {
830                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831                 status = -EINVAL;
832                 goto exit;
833         }
834
835         memcpy(qdev->ndev->dev_addr,
836                 mac_addr,
837                 qdev->ndev->addr_len);
838
839 exit:
840         ql_sem_unlock(qdev, SEM_FLASH_MASK);
841         return status;
842 }
843
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846         int i;
847         int status;
848         __le32 *p = (__le32 *)&qdev->flash;
849         u32 offset = 0;
850         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852         /* Second function's parameters follow the first
853          * function's.
854          */
855         if (qdev->port)
856                 offset = size;
857
858         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859                 return -ETIMEDOUT;
860
861         for (i = 0; i < size; i++, p++) {
862                 status = ql_read_flash_word(qdev, i+offset, p);
863                 if (status) {
864                         netif_err(qdev, ifup, qdev->ndev,
865                                   "Error reading flash.\n");
866                         goto exit;
867                 }
868
869         }
870
871         status = ql_validate_flash(qdev,
872                         sizeof(struct flash_params_8012) / sizeof(u16),
873                         "8012");
874         if (status) {
875                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881                 status = -EINVAL;
882                 goto exit;
883         }
884
885         memcpy(qdev->ndev->dev_addr,
886                 qdev->flash.flash_params_8012.mac_addr,
887                 qdev->ndev->addr_len);
888
889 exit:
890         ql_sem_unlock(qdev, SEM_FLASH_MASK);
891         return status;
892 }
893
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900         int status;
901         /* wait for reg to come ready */
902         status = ql_wait_reg_rdy(qdev,
903                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904         if (status)
905                 return status;
906         /* write the data to the data reg */
907         ql_write32(qdev, XGMAC_DATA, data);
908         /* trigger the write */
909         ql_write32(qdev, XGMAC_ADDR, reg);
910         return status;
911 }
912
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919         int status = 0;
920         /* wait for reg to come ready */
921         status = ql_wait_reg_rdy(qdev,
922                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923         if (status)
924                 goto exit;
925         /* set up for reg read */
926         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927         /* wait for reg to come ready */
928         status = ql_wait_reg_rdy(qdev,
929                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930         if (status)
931                 goto exit;
932         /* get the data */
933         *data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935         return status;
936 }
937
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941         int status = 0;
942         u32 hi = 0;
943         u32 lo = 0;
944
945         status = ql_read_xgmac_reg(qdev, reg, &lo);
946         if (status)
947                 goto exit;
948
949         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950         if (status)
951                 goto exit;
952
953         *data = (u64) lo | ((u64) hi << 32);
954
955 exit:
956         return status;
957 }
958
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961         int status;
962         /*
963          * Get MPI firmware version for driver banner
964          * and ethool info.
965          */
966         status = ql_mb_about_fw(qdev);
967         if (status)
968                 goto exit;
969         status = ql_mb_get_fw_state(qdev);
970         if (status)
971                 goto exit;
972         /* Wake up a worker to get/set the TX/RX frame sizes. */
973         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975         return status;
976 }
977
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986         int status = 0;
987         u32 data;
988
989         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990                 /* Another function has the semaphore, so
991                  * wait for the port init bit to come ready.
992                  */
993                 netif_info(qdev, link, qdev->ndev,
994                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996                 if (status) {
997                         netif_crit(qdev, link, qdev->ndev,
998                                    "Port initialize timed out.\n");
999                 }
1000                 return status;
1001         }
1002
1003         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004         /* Set the core reset. */
1005         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006         if (status)
1007                 goto end;
1008         data |= GLOBAL_CFG_RESET;
1009         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010         if (status)
1011                 goto end;
1012
1013         /* Clear the core reset and turn on jumbo for receiver. */
1014         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1015         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1016         data |= GLOBAL_CFG_TX_STAT_EN;
1017         data |= GLOBAL_CFG_RX_STAT_EN;
1018         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019         if (status)
1020                 goto end;
1021
1022         /* Enable transmitter, and clear it's reset. */
1023         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024         if (status)
1025                 goto end;
1026         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1027         data |= TX_CFG_EN;      /* Enable the transmitter. */
1028         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029         if (status)
1030                 goto end;
1031
1032         /* Enable receiver and clear it's reset. */
1033         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034         if (status)
1035                 goto end;
1036         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1037         data |= RX_CFG_EN;      /* Enable the receiver. */
1038         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039         if (status)
1040                 goto end;
1041
1042         /* Turn on jumbo. */
1043         status =
1044             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045         if (status)
1046                 goto end;
1047         status =
1048             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049         if (status)
1050                 goto end;
1051
1052         /* Signal to the world that the port is enabled.        */
1053         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056         return status;
1057 }
1058
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061         return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068         rx_ring->lbq_curr_idx++;
1069         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070                 rx_ring->lbq_curr_idx = 0;
1071         rx_ring->lbq_free_cnt++;
1072         return lbq_desc;
1073 }
1074
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076                 struct rx_ring *rx_ring)
1077 {
1078         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080         pci_dma_sync_single_for_cpu(qdev->pdev,
1081                                         dma_unmap_addr(lbq_desc, mapaddr),
1082                                     rx_ring->lbq_buf_size,
1083                                         PCI_DMA_FROMDEVICE);
1084
1085         /* If it's the last chunk of our master page then
1086          * we unmap it.
1087          */
1088         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089                                         == ql_lbq_block_size(qdev))
1090                 pci_unmap_page(qdev->pdev,
1091                                 lbq_desc->p.pg_chunk.map,
1092                                 ql_lbq_block_size(qdev),
1093                                 PCI_DMA_FROMDEVICE);
1094         return lbq_desc;
1095 }
1096
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101         rx_ring->sbq_curr_idx++;
1102         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103                 rx_ring->sbq_curr_idx = 0;
1104         rx_ring->sbq_free_cnt++;
1105         return sbq_desc;
1106 }
1107
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111         rx_ring->cnsmr_idx++;
1112         rx_ring->curr_entry++;
1113         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114                 rx_ring->cnsmr_idx = 0;
1115                 rx_ring->curr_entry = rx_ring->cq_base;
1116         }
1117 }
1118
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125                                                 struct bq_desc *lbq_desc)
1126 {
1127         if (!rx_ring->pg_chunk.page) {
1128                 u64 map;
1129                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130                                                 GFP_ATOMIC,
1131                                                 qdev->lbq_buf_order);
1132                 if (unlikely(!rx_ring->pg_chunk.page)) {
1133                         netif_err(qdev, drv, qdev->ndev,
1134                                   "page allocation failed.\n");
1135                         return -ENOMEM;
1136                 }
1137                 rx_ring->pg_chunk.offset = 0;
1138                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139                                         0, ql_lbq_block_size(qdev),
1140                                         PCI_DMA_FROMDEVICE);
1141                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142                         __free_pages(rx_ring->pg_chunk.page,
1143                                         qdev->lbq_buf_order);
1144                         netif_err(qdev, drv, qdev->ndev,
1145                                   "PCI mapping failed.\n");
1146                         return -ENOMEM;
1147                 }
1148                 rx_ring->pg_chunk.map = map;
1149                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150         }
1151
1152         /* Copy the current master pg_chunk info
1153          * to the current descriptor.
1154          */
1155         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157         /* Adjust the master page chunk for next
1158          * buffer get.
1159          */
1160         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162                 rx_ring->pg_chunk.page = NULL;
1163                 lbq_desc->p.pg_chunk.last_flag = 1;
1164         } else {
1165                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166                 get_page(rx_ring->pg_chunk.page);
1167                 lbq_desc->p.pg_chunk.last_flag = 0;
1168         }
1169         return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174         u32 clean_idx = rx_ring->lbq_clean_idx;
1175         u32 start_idx = clean_idx;
1176         struct bq_desc *lbq_desc;
1177         u64 map;
1178         int i;
1179
1180         while (rx_ring->lbq_free_cnt > 32) {
1181                 for (i = 0; i < 16; i++) {
1182                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                                      "lbq: try cleaning clean_idx = %d.\n",
1184                                      clean_idx);
1185                         lbq_desc = &rx_ring->lbq[clean_idx];
1186                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187                                 netif_err(qdev, ifup, qdev->ndev,
1188                                           "Could not get a page chunk.\n");
1189                                 return;
1190                         }
1191
1192                         map = lbq_desc->p.pg_chunk.map +
1193                                 lbq_desc->p.pg_chunk.offset;
1194                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195                         dma_unmap_len_set(lbq_desc, maplen,
1196                                         rx_ring->lbq_buf_size);
1197                                 *lbq_desc->addr = cpu_to_le64(map);
1198
1199                         pci_dma_sync_single_for_device(qdev->pdev, map,
1200                                                 rx_ring->lbq_buf_size,
1201                                                 PCI_DMA_FROMDEVICE);
1202                         clean_idx++;
1203                         if (clean_idx == rx_ring->lbq_len)
1204                                 clean_idx = 0;
1205                 }
1206
1207                 rx_ring->lbq_clean_idx = clean_idx;
1208                 rx_ring->lbq_prod_idx += 16;
1209                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210                         rx_ring->lbq_prod_idx = 0;
1211                 rx_ring->lbq_free_cnt -= 16;
1212         }
1213
1214         if (start_idx != clean_idx) {
1215                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216                              "lbq: updating prod idx = %d.\n",
1217                              rx_ring->lbq_prod_idx);
1218                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219                                 rx_ring->lbq_prod_idx_db_reg);
1220         }
1221 }
1222
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226         u32 clean_idx = rx_ring->sbq_clean_idx;
1227         u32 start_idx = clean_idx;
1228         struct bq_desc *sbq_desc;
1229         u64 map;
1230         int i;
1231
1232         while (rx_ring->sbq_free_cnt > 16) {
1233                 for (i = 0; i < 16; i++) {
1234                         sbq_desc = &rx_ring->sbq[clean_idx];
1235                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236                                      "sbq: try cleaning clean_idx = %d.\n",
1237                                      clean_idx);
1238                         if (sbq_desc->p.skb == NULL) {
1239                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1240                                              qdev->ndev,
1241                                              "sbq: getting new skb for index %d.\n",
1242                                              sbq_desc->index);
1243                                 sbq_desc->p.skb =
1244                                     netdev_alloc_skb(qdev->ndev,
1245                                                      SMALL_BUFFER_SIZE);
1246                                 if (sbq_desc->p.skb == NULL) {
1247                                         netif_err(qdev, probe, qdev->ndev,
1248                                                   "Couldn't get an skb.\n");
1249                                         rx_ring->sbq_clean_idx = clean_idx;
1250                                         return;
1251                                 }
1252                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253                                 map = pci_map_single(qdev->pdev,
1254                                                      sbq_desc->p.skb->data,
1255                                                      rx_ring->sbq_buf_size,
1256                                                      PCI_DMA_FROMDEVICE);
1257                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258                                         netif_err(qdev, ifup, qdev->ndev,
1259                                                   "PCI mapping failed.\n");
1260                                         rx_ring->sbq_clean_idx = clean_idx;
1261                                         dev_kfree_skb_any(sbq_desc->p.skb);
1262                                         sbq_desc->p.skb = NULL;
1263                                         return;
1264                                 }
1265                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266                                 dma_unmap_len_set(sbq_desc, maplen,
1267                                                   rx_ring->sbq_buf_size);
1268                                 *sbq_desc->addr = cpu_to_le64(map);
1269                         }
1270
1271                         clean_idx++;
1272                         if (clean_idx == rx_ring->sbq_len)
1273                                 clean_idx = 0;
1274                 }
1275                 rx_ring->sbq_clean_idx = clean_idx;
1276                 rx_ring->sbq_prod_idx += 16;
1277                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278                         rx_ring->sbq_prod_idx = 0;
1279                 rx_ring->sbq_free_cnt -= 16;
1280         }
1281
1282         if (start_idx != clean_idx) {
1283                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284                              "sbq: updating prod idx = %d.\n",
1285                              rx_ring->sbq_prod_idx);
1286                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287                                 rx_ring->sbq_prod_idx_db_reg);
1288         }
1289 }
1290
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292                                     struct rx_ring *rx_ring)
1293 {
1294         ql_update_sbq(qdev, rx_ring);
1295         ql_update_lbq(qdev, rx_ring);
1296 }
1297
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302                           struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304         int i;
1305         for (i = 0; i < mapped; i++) {
1306                 if (i == 0 || (i == 7 && mapped > 7)) {
1307                         /*
1308                          * Unmap the skb->data area, or the
1309                          * external sglist (AKA the Outbound
1310                          * Address List (OAL)).
1311                          * If its the zeroeth element, then it's
1312                          * the skb->data area.  If it's the 7th
1313                          * element and there is more than 6 frags,
1314                          * then its an OAL.
1315                          */
1316                         if (i == 7) {
1317                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1318                                              qdev->ndev,
1319                                              "unmapping OAL area.\n");
1320                         }
1321                         pci_unmap_single(qdev->pdev,
1322                                          dma_unmap_addr(&tx_ring_desc->map[i],
1323                                                         mapaddr),
1324                                          dma_unmap_len(&tx_ring_desc->map[i],
1325                                                        maplen),
1326                                          PCI_DMA_TODEVICE);
1327                 } else {
1328                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329                                      "unmapping frag %d.\n", i);
1330                         pci_unmap_page(qdev->pdev,
1331                                        dma_unmap_addr(&tx_ring_desc->map[i],
1332                                                       mapaddr),
1333                                        dma_unmap_len(&tx_ring_desc->map[i],
1334                                                      maplen), PCI_DMA_TODEVICE);
1335                 }
1336         }
1337
1338 }
1339
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
1343 static int ql_map_send(struct ql_adapter *qdev,
1344                        struct ob_mac_iocb_req *mac_iocb_ptr,
1345                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347         int len = skb_headlen(skb);
1348         dma_addr_t map;
1349         int frag_idx, err, map_idx = 0;
1350         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351         int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353         if (frag_cnt) {
1354                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355                              "frag_cnt = %d.\n", frag_cnt);
1356         }
1357         /*
1358          * Map the skb buffer first.
1359          */
1360         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362         err = pci_dma_mapping_error(qdev->pdev, map);
1363         if (err) {
1364                 netif_err(qdev, tx_queued, qdev->ndev,
1365                           "PCI mapping failed with error: %d\n", err);
1366
1367                 return NETDEV_TX_BUSY;
1368         }
1369
1370         tbd->len = cpu_to_le32(len);
1371         tbd->addr = cpu_to_le64(map);
1372         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374         map_idx++;
1375
1376         /*
1377          * This loop fills the remainder of the 8 address descriptors
1378          * in the IOCB.  If there are more than 7 fragments, then the
1379          * eighth address desc will point to an external list (OAL).
1380          * When this happens, the remainder of the frags will be stored
1381          * in this list.
1382          */
1383         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385                 tbd++;
1386                 if (frag_idx == 6 && frag_cnt > 7) {
1387                         /* Let's tack on an sglist.
1388                          * Our control block will now
1389                          * look like this:
1390                          * iocb->seg[0] = skb->data
1391                          * iocb->seg[1] = frag[0]
1392                          * iocb->seg[2] = frag[1]
1393                          * iocb->seg[3] = frag[2]
1394                          * iocb->seg[4] = frag[3]
1395                          * iocb->seg[5] = frag[4]
1396                          * iocb->seg[6] = frag[5]
1397                          * iocb->seg[7] = ptr to OAL (external sglist)
1398                          * oal->seg[0] = frag[6]
1399                          * oal->seg[1] = frag[7]
1400                          * oal->seg[2] = frag[8]
1401                          * oal->seg[3] = frag[9]
1402                          * oal->seg[4] = frag[10]
1403                          *      etc...
1404                          */
1405                         /* Tack on the OAL in the eighth segment of IOCB. */
1406                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407                                              sizeof(struct oal),
1408                                              PCI_DMA_TODEVICE);
1409                         err = pci_dma_mapping_error(qdev->pdev, map);
1410                         if (err) {
1411                                 netif_err(qdev, tx_queued, qdev->ndev,
1412                                           "PCI mapping outbound address list with error: %d\n",
1413                                           err);
1414                                 goto map_error;
1415                         }
1416
1417                         tbd->addr = cpu_to_le64(map);
1418                         /*
1419                          * The length is the number of fragments
1420                          * that remain to be mapped times the length
1421                          * of our sglist (OAL).
1422                          */
1423                         tbd->len =
1424                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1425                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1426                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427                                            map);
1428                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429                                           sizeof(struct oal));
1430                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431                         map_idx++;
1432                 }
1433
1434                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1435                                        DMA_TO_DEVICE);
1436
1437                 err = dma_mapping_error(&qdev->pdev->dev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   skb_frag_size(frag));
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct napi_struct *napi = &rx_ring->napi;
1479
1480         napi->dev = qdev->ndev;
1481
1482         skb = napi_get_frags(napi);
1483         if (!skb) {
1484                 netif_err(qdev, drv, qdev->ndev,
1485                           "Couldn't get an skb, exiting.\n");
1486                 rx_ring->rx_dropped++;
1487                 put_page(lbq_desc->p.pg_chunk.page);
1488                 return;
1489         }
1490         prefetch(lbq_desc->p.pg_chunk.va);
1491         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492                              lbq_desc->p.pg_chunk.page,
1493                              lbq_desc->p.pg_chunk.offset,
1494                              length);
1495
1496         skb->len += length;
1497         skb->data_len += length;
1498         skb->truesize += length;
1499         skb_shinfo(skb)->nr_frags++;
1500
1501         rx_ring->rx_packets++;
1502         rx_ring->rx_bytes += length;
1503         skb->ip_summed = CHECKSUM_UNNECESSARY;
1504         skb_record_rx_queue(skb, rx_ring->cq_id);
1505         if (vlan_id != 0xffff)
1506                 __vlan_hwaccel_put_tag(skb, vlan_id);
1507         napi_gro_frags(napi);
1508 }
1509
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512                                         struct rx_ring *rx_ring,
1513                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1514                                         u32 length,
1515                                         u16 vlan_id)
1516 {
1517         struct net_device *ndev = qdev->ndev;
1518         struct sk_buff *skb = NULL;
1519         void *addr;
1520         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521         struct napi_struct *napi = &rx_ring->napi;
1522
1523         skb = netdev_alloc_skb(ndev, length);
1524         if (!skb) {
1525                 netif_err(qdev, drv, qdev->ndev,
1526                           "Couldn't get an skb, need to unwind!.\n");
1527                 rx_ring->rx_dropped++;
1528                 put_page(lbq_desc->p.pg_chunk.page);
1529                 return;
1530         }
1531
1532         addr = lbq_desc->p.pg_chunk.va;
1533         prefetch(addr);
1534
1535
1536         /* Frame error, so drop the packet. */
1537         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538                 netif_info(qdev, drv, qdev->ndev,
1539                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540                 rx_ring->rx_errors++;
1541                 goto err_out;
1542         }
1543
1544         /* The max framesize filter on this chip is set higher than
1545          * MTU since FCoE uses 2k frames.
1546          */
1547         if (skb->len > ndev->mtu + ETH_HLEN) {
1548                 netif_err(qdev, drv, qdev->ndev,
1549                           "Segment too small, dropping.\n");
1550                 rx_ring->rx_dropped++;
1551                 goto err_out;
1552         }
1553         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556                      length);
1557         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559                                 length-ETH_HLEN);
1560         skb->len += length-ETH_HLEN;
1561         skb->data_len += length-ETH_HLEN;
1562         skb->truesize += length-ETH_HLEN;
1563
1564         rx_ring->rx_packets++;
1565         rx_ring->rx_bytes += skb->len;
1566         skb->protocol = eth_type_trans(skb, ndev);
1567         skb_checksum_none_assert(skb);
1568
1569         if ((ndev->features & NETIF_F_RXCSUM) &&
1570                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571                 /* TCP frame. */
1572                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574                                      "TCP checksum done!\n");
1575                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1576                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578                         /* Unfragmented ipv4 UDP frame. */
1579                         struct iphdr *iph = (struct iphdr *) skb->data;
1580                         if (!(iph->frag_off &
1581                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1584                                              qdev->ndev,
1585                                              "TCP checksum done!\n");
1586                         }
1587                 }
1588         }
1589
1590         skb_record_rx_queue(skb, rx_ring->cq_id);
1591         if (vlan_id != 0xffff)
1592                 __vlan_hwaccel_put_tag(skb, vlan_id);
1593         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594                 napi_gro_receive(napi, skb);
1595         else
1596                 netif_receive_skb(skb);
1597         return;
1598 err_out:
1599         dev_kfree_skb_any(skb);
1600         put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605                                         struct rx_ring *rx_ring,
1606                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1607                                         u32 length,
1608                                         u16 vlan_id)
1609 {
1610         struct net_device *ndev = qdev->ndev;
1611         struct sk_buff *skb = NULL;
1612         struct sk_buff *new_skb = NULL;
1613         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615         skb = sbq_desc->p.skb;
1616         /* Allocate new_skb and copy */
1617         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618         if (new_skb == NULL) {
1619                 netif_err(qdev, probe, qdev->ndev,
1620                           "No skb available, drop the packet.\n");
1621                 rx_ring->rx_dropped++;
1622                 return;
1623         }
1624         skb_reserve(new_skb, NET_IP_ALIGN);
1625         memcpy(skb_put(new_skb, length), skb->data, length);
1626         skb = new_skb;
1627
1628         /* Frame error, so drop the packet. */
1629         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1630                 netif_info(qdev, drv, qdev->ndev,
1631                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1632                 dev_kfree_skb_any(skb);
1633                 rx_ring->rx_errors++;
1634                 return;
1635         }
1636
1637         /* loopback self test for ethtool */
1638         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639                 ql_check_lb_frame(qdev, skb);
1640                 dev_kfree_skb_any(skb);
1641                 return;
1642         }
1643
1644         /* The max framesize filter on this chip is set higher than
1645          * MTU since FCoE uses 2k frames.
1646          */
1647         if (skb->len > ndev->mtu + ETH_HLEN) {
1648                 dev_kfree_skb_any(skb);
1649                 rx_ring->rx_dropped++;
1650                 return;
1651         }
1652
1653         prefetch(skb->data);
1654         skb->dev = ndev;
1655         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657                              "%s Multicast.\n",
1658                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1664         }
1665         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "Promiscuous Packet.\n");
1668
1669         rx_ring->rx_packets++;
1670         rx_ring->rx_bytes += skb->len;
1671         skb->protocol = eth_type_trans(skb, ndev);
1672         skb_checksum_none_assert(skb);
1673
1674         /* If rx checksum is on, and there are no
1675          * csum or frame errors.
1676          */
1677         if ((ndev->features & NETIF_F_RXCSUM) &&
1678                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679                 /* TCP frame. */
1680                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682                                      "TCP checksum done!\n");
1683                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1684                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686                         /* Unfragmented ipv4 UDP frame. */
1687                         struct iphdr *iph = (struct iphdr *) skb->data;
1688                         if (!(iph->frag_off &
1689                                 ntohs(IP_MF|IP_OFFSET))) {
1690                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1692                                              qdev->ndev,
1693                                              "TCP checksum done!\n");
1694                         }
1695                 }
1696         }
1697
1698         skb_record_rx_queue(skb, rx_ring->cq_id);
1699         if (vlan_id != 0xffff)
1700                 __vlan_hwaccel_put_tag(skb, vlan_id);
1701         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702                 napi_gro_receive(&rx_ring->napi, skb);
1703         else
1704                 netif_receive_skb(skb);
1705 }
1706
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1708 {
1709         void *temp_addr = skb->data;
1710
1711         /* Undo the skb_reserve(skb,32) we did before
1712          * giving to hardware, and realign data on
1713          * a 2-byte boundary.
1714          */
1715         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717         skb_copy_to_linear_data(skb, temp_addr,
1718                 (unsigned int)len);
1719 }
1720
1721 /*
1722  * This function builds an skb for the given inbound
1723  * completion.  It will be rewritten for readability in the near
1724  * future, but for not it works well.
1725  */
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727                                        struct rx_ring *rx_ring,
1728                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1729 {
1730         struct bq_desc *lbq_desc;
1731         struct bq_desc *sbq_desc;
1732         struct sk_buff *skb = NULL;
1733         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736         /*
1737          * Handle the header buffer if present.
1738          */
1739         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742                              "Header of %d bytes in small buffer.\n", hdr_len);
1743                 /*
1744                  * Headers fit nicely into a small buffer.
1745                  */
1746                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747                 pci_unmap_single(qdev->pdev,
1748                                 dma_unmap_addr(sbq_desc, mapaddr),
1749                                 dma_unmap_len(sbq_desc, maplen),
1750                                 PCI_DMA_FROMDEVICE);
1751                 skb = sbq_desc->p.skb;
1752                 ql_realign_skb(skb, hdr_len);
1753                 skb_put(skb, hdr_len);
1754                 sbq_desc->p.skb = NULL;
1755         }
1756
1757         /*
1758          * Handle the data buffer(s).
1759          */
1760         if (unlikely(!length)) {        /* Is there data too? */
1761                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762                              "No Data buffer in this packet.\n");
1763                 return skb;
1764         }
1765
1766         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769                                      "Headers in small, data of %d bytes in small, combine them.\n",
1770                                      length);
1771                         /*
1772                          * Data is less than small buffer size so it's
1773                          * stuffed in a small buffer.
1774                          * For this case we append the data
1775                          * from the "data" small buffer to the "header" small
1776                          * buffer.
1777                          */
1778                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1779                         pci_dma_sync_single_for_cpu(qdev->pdev,
1780                                                     dma_unmap_addr
1781                                                     (sbq_desc, mapaddr),
1782                                                     dma_unmap_len
1783                                                     (sbq_desc, maplen),
1784                                                     PCI_DMA_FROMDEVICE);
1785                         memcpy(skb_put(skb, length),
1786                                sbq_desc->p.skb->data, length);
1787                         pci_dma_sync_single_for_device(qdev->pdev,
1788                                                        dma_unmap_addr
1789                                                        (sbq_desc,
1790                                                         mapaddr),
1791                                                        dma_unmap_len
1792                                                        (sbq_desc,
1793                                                         maplen),
1794                                                        PCI_DMA_FROMDEVICE);
1795                 } else {
1796                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                                      "%d bytes in a single small buffer.\n",
1798                                      length);
1799                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1800                         skb = sbq_desc->p.skb;
1801                         ql_realign_skb(skb, length);
1802                         skb_put(skb, length);
1803                         pci_unmap_single(qdev->pdev,
1804                                          dma_unmap_addr(sbq_desc,
1805                                                         mapaddr),
1806                                          dma_unmap_len(sbq_desc,
1807                                                        maplen),
1808                                          PCI_DMA_FROMDEVICE);
1809                         sbq_desc->p.skb = NULL;
1810                 }
1811         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814                                      "Header in small, %d bytes in large. Chain large to small!\n",
1815                                      length);
1816                         /*
1817                          * The data is in a single large buffer.  We
1818                          * chain it to the header buffer's skb and let
1819                          * it rip.
1820                          */
1821                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1824                                      lbq_desc->p.pg_chunk.offset, length);
1825                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826                                                 lbq_desc->p.pg_chunk.offset,
1827                                                 length);
1828                         skb->len += length;
1829                         skb->data_len += length;
1830                         skb->truesize += length;
1831                 } else {
1832                         /*
1833                          * The headers and data are in a single large buffer. We
1834                          * copy it to a new skb and let it go. This can happen with
1835                          * jumbo mtu on a non-TCP/UDP frame.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         skb = netdev_alloc_skb(qdev->ndev, length);
1839                         if (skb == NULL) {
1840                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841                                              "No skb available, drop the packet.\n");
1842                                 return NULL;
1843                         }
1844                         pci_unmap_page(qdev->pdev,
1845                                        dma_unmap_addr(lbq_desc,
1846                                                       mapaddr),
1847                                        dma_unmap_len(lbq_desc, maplen),
1848                                        PCI_DMA_FROMDEVICE);
1849                         skb_reserve(skb, NET_IP_ALIGN);
1850                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852                                      length);
1853                         skb_fill_page_desc(skb, 0,
1854                                                 lbq_desc->p.pg_chunk.page,
1855                                                 lbq_desc->p.pg_chunk.offset,
1856                                                 length);
1857                         skb->len += length;
1858                         skb->data_len += length;
1859                         skb->truesize += length;
1860                         length -= length;
1861                         __pskb_pull_tail(skb,
1862                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863                                 VLAN_ETH_HLEN : ETH_HLEN);
1864                 }
1865         } else {
1866                 /*
1867                  * The data is in a chain of large buffers
1868                  * pointed to by a small buffer.  We loop
1869                  * thru and chain them to the our small header
1870                  * buffer's skb.
1871                  * frags:  There are 18 max frags and our small
1872                  *         buffer will hold 32 of them. The thing is,
1873                  *         we'll use 3 max for our 9000 byte jumbo
1874                  *         frames.  If the MTU goes up we could
1875                  *          eventually be in trouble.
1876                  */
1877                 int size, i = 0;
1878                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879                 pci_unmap_single(qdev->pdev,
1880                                  dma_unmap_addr(sbq_desc, mapaddr),
1881                                  dma_unmap_len(sbq_desc, maplen),
1882                                  PCI_DMA_FROMDEVICE);
1883                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884                         /*
1885                          * This is an non TCP/UDP IP frame, so
1886                          * the headers aren't split into a small
1887                          * buffer.  We have to use the small buffer
1888                          * that contains our sg list as our skb to
1889                          * send upstairs. Copy the sg list here to
1890                          * a local buffer and use it to find the
1891                          * pages to chain.
1892                          */
1893                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894                                      "%d bytes of headers & data in chain of large.\n",
1895                                      length);
1896                         skb = sbq_desc->p.skb;
1897                         sbq_desc->p.skb = NULL;
1898                         skb_reserve(skb, NET_IP_ALIGN);
1899                 }
1900                 while (length > 0) {
1901                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902                         size = (length < rx_ring->lbq_buf_size) ? length :
1903                                 rx_ring->lbq_buf_size;
1904
1905                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906                                      "Adding page %d to skb for %d bytes.\n",
1907                                      i, size);
1908                         skb_fill_page_desc(skb, i,
1909                                                 lbq_desc->p.pg_chunk.page,
1910                                                 lbq_desc->p.pg_chunk.offset,
1911                                                 size);
1912                         skb->len += size;
1913                         skb->data_len += size;
1914                         skb->truesize += size;
1915                         length -= size;
1916                         i++;
1917                 }
1918                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919                                 VLAN_ETH_HLEN : ETH_HLEN);
1920         }
1921         return skb;
1922 }
1923
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926                                    struct rx_ring *rx_ring,
1927                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1928                                    u16 vlan_id)
1929 {
1930         struct net_device *ndev = qdev->ndev;
1931         struct sk_buff *skb = NULL;
1932
1933         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936         if (unlikely(!skb)) {
1937                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                              "No skb available, drop packet.\n");
1939                 rx_ring->rx_dropped++;
1940                 return;
1941         }
1942
1943         /* Frame error, so drop the packet. */
1944         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945                 netif_info(qdev, drv, qdev->ndev,
1946                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1947                 dev_kfree_skb_any(skb);
1948                 rx_ring->rx_errors++;
1949                 return;
1950         }
1951
1952         /* The max framesize filter on this chip is set higher than
1953          * MTU since FCoE uses 2k frames.
1954          */
1955         if (skb->len > ndev->mtu + ETH_HLEN) {
1956                 dev_kfree_skb_any(skb);
1957                 rx_ring->rx_dropped++;
1958                 return;
1959         }
1960
1961         /* loopback self test for ethtool */
1962         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963                 ql_check_lb_frame(qdev, skb);
1964                 dev_kfree_skb_any(skb);
1965                 return;
1966         }
1967
1968         prefetch(skb->data);
1969         skb->dev = ndev;
1970         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1971                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1978                 rx_ring->rx_multicast++;
1979         }
1980         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1981                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982                              "Promiscuous Packet.\n");
1983         }
1984
1985         skb->protocol = eth_type_trans(skb, ndev);
1986         skb_checksum_none_assert(skb);
1987
1988         /* If rx checksum is on, and there are no
1989          * csum or frame errors.
1990          */
1991         if ((ndev->features & NETIF_F_RXCSUM) &&
1992                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993                 /* TCP frame. */
1994                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1995                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996                                      "TCP checksum done!\n");
1997                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1998                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000                 /* Unfragmented ipv4 UDP frame. */
2001                         struct iphdr *iph = (struct iphdr *) skb->data;
2002                         if (!(iph->frag_off &
2003                                 ntohs(IP_MF|IP_OFFSET))) {
2004                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006                                              "TCP checksum done!\n");
2007                         }
2008                 }
2009         }
2010
2011         rx_ring->rx_packets++;
2012         rx_ring->rx_bytes += skb->len;
2013         skb_record_rx_queue(skb, rx_ring->cq_id);
2014         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015                 __vlan_hwaccel_put_tag(skb, vlan_id);
2016         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017                 napi_gro_receive(&rx_ring->napi, skb);
2018         else
2019                 netif_receive_skb(skb);
2020 }
2021
2022 /* Process an inbound completion from an rx ring. */
2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024                                         struct rx_ring *rx_ring,
2025                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2026 {
2027         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031
2032         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033
2034         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035                 /* The data and headers are split into
2036                  * separate buffers.
2037                  */
2038                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039                                                 vlan_id);
2040         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041                 /* The data fit in a single small buffer.
2042                  * Allocate a new skb, copy the data and
2043                  * return the buffer to the free pool.
2044                  */
2045                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046                                                 length, vlan_id);
2047         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050                 /* TCP packet in a page chunk that's been checksummed.
2051                  * Tack it on to our GRO skb and let it go.
2052                  */
2053                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054                                                 length, vlan_id);
2055         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056                 /* Non-TCP packet in a page chunk. Allocate an
2057                  * skb, tack it on frags, and send it up.
2058                  */
2059                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060                                                 length, vlan_id);
2061         } else {
2062                 /* Non-TCP/UDP large frames that span multiple buffers
2063                  * can be processed corrrectly by the split frame logic.
2064                  */
2065                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066                                                 vlan_id);
2067         }
2068
2069         return (unsigned long)length;
2070 }
2071
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074                                    struct ob_mac_iocb_rsp *mac_rsp)
2075 {
2076         struct tx_ring *tx_ring;
2077         struct tx_ring_desc *tx_ring_desc;
2078
2079         QL_DUMP_OB_MAC_RSP(mac_rsp);
2080         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084         tx_ring->tx_packets++;
2085         dev_kfree_skb(tx_ring_desc->skb);
2086         tx_ring_desc->skb = NULL;
2087
2088         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089                                         OB_MAC_IOCB_RSP_S |
2090                                         OB_MAC_IOCB_RSP_L |
2091                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093                         netif_warn(qdev, tx_done, qdev->ndev,
2094                                    "Total descriptor length did not match transfer length.\n");
2095                 }
2096                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097                         netif_warn(qdev, tx_done, qdev->ndev,
2098                                    "Frame too short to be valid, not sent.\n");
2099                 }
2100                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101                         netif_warn(qdev, tx_done, qdev->ndev,
2102                                    "Frame too long, but sent anyway.\n");
2103                 }
2104                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105                         netif_warn(qdev, tx_done, qdev->ndev,
2106                                    "PCI backplane error. Frame not sent.\n");
2107                 }
2108         }
2109         atomic_inc(&tx_ring->tx_count);
2110 }
2111
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2114 {
2115         ql_link_off(qdev);
2116         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117 }
2118
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2120 {
2121         ql_link_off(qdev);
2122         ql_disable_interrupts(qdev);
2123         /* Clear adapter up bit to signal the recovery
2124          * process that it shouldn't kill the reset worker
2125          * thread
2126          */
2127         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128         /* Set asic recovery bit to indicate reset process that we are
2129          * in fatal error recovery process rather than normal close
2130          */
2131         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2132         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133 }
2134
2135 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2137 {
2138         switch (ib_ae_rsp->event) {
2139         case MGMT_ERR_EVENT:
2140                 netif_err(qdev, rx_err, qdev->ndev,
2141                           "Management Processor Fatal Error.\n");
2142                 ql_queue_fw_error(qdev);
2143                 return;
2144
2145         case CAM_LOOKUP_ERR_EVENT:
2146                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2148                 ql_queue_asic_error(qdev);
2149                 return;
2150
2151         case SOFT_ECC_ERROR_EVENT:
2152                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2153                 ql_queue_asic_error(qdev);
2154                 break;
2155
2156         case PCI_ERR_ANON_BUF_RD:
2157                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158                                         "anonymous buffers from rx_ring %d.\n",
2159                                         ib_ae_rsp->q_id);
2160                 ql_queue_asic_error(qdev);
2161                 break;
2162
2163         default:
2164                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165                           ib_ae_rsp->event);
2166                 ql_queue_asic_error(qdev);
2167                 break;
2168         }
2169 }
2170
2171 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172 {
2173         struct ql_adapter *qdev = rx_ring->qdev;
2174         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2175         struct ob_mac_iocb_rsp *net_rsp = NULL;
2176         int count = 0;
2177
2178         struct tx_ring *tx_ring;
2179         /* While there are entries in the completion queue. */
2180         while (prod != rx_ring->cnsmr_idx) {
2181
2182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2185
2186                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187                 rmb();
2188                 switch (net_rsp->opcode) {
2189
2190                 case OPCODE_OB_MAC_TSO_IOCB:
2191                 case OPCODE_OB_MAC_IOCB:
2192                         ql_process_mac_tx_intr(qdev, net_rsp);
2193                         break;
2194                 default:
2195                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197                                      net_rsp->opcode);
2198                 }
2199                 count++;
2200                 ql_update_cq(rx_ring);
2201                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2202         }
2203         if (!net_rsp)
2204                 return 0;
2205         ql_write_cq_idx(rx_ring);
2206         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2207         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2208                 if (atomic_read(&tx_ring->queue_stopped) &&
2209                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210                         /*
2211                          * The queue got stopped because the tx_ring was full.
2212                          * Wake it up, because it's now at least 25% empty.
2213                          */
2214                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2215         }
2216
2217         return count;
2218 }
2219
2220 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221 {
2222         struct ql_adapter *qdev = rx_ring->qdev;
2223         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224         struct ql_net_rsp_iocb *net_rsp;
2225         int count = 0;
2226
2227         /* While there are entries in the completion queue. */
2228         while (prod != rx_ring->cnsmr_idx) {
2229
2230                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2233
2234                 net_rsp = rx_ring->curr_entry;
2235                 rmb();
2236                 switch (net_rsp->opcode) {
2237                 case OPCODE_IB_MAC_IOCB:
2238                         ql_process_mac_rx_intr(qdev, rx_ring,
2239                                                (struct ib_mac_iocb_rsp *)
2240                                                net_rsp);
2241                         break;
2242
2243                 case OPCODE_IB_AE_IOCB:
2244                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245                                                 net_rsp);
2246                         break;
2247                 default:
2248                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250                                      net_rsp->opcode);
2251                         break;
2252                 }
2253                 count++;
2254                 ql_update_cq(rx_ring);
2255                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256                 if (count == budget)
2257                         break;
2258         }
2259         ql_update_buffer_queues(qdev, rx_ring);
2260         ql_write_cq_idx(rx_ring);
2261         return count;
2262 }
2263
2264 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265 {
2266         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267         struct ql_adapter *qdev = rx_ring->qdev;
2268         struct rx_ring *trx_ring;
2269         int i, work_done = 0;
2270         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2271
2272         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2274
2275         /* Service the TX rings first.  They start
2276          * right after the RSS rings. */
2277         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278                 trx_ring = &qdev->rx_ring[i];
2279                 /* If this TX completion ring belongs to this vector and
2280                  * it's not empty then service it.
2281                  */
2282                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284                                         trx_ring->cnsmr_idx)) {
2285                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286                                      "%s: Servicing TX completion ring %d.\n",
2287                                      __func__, trx_ring->cq_id);
2288                         ql_clean_outbound_rx_ring(trx_ring);
2289                 }
2290         }
2291
2292         /*
2293          * Now service the RSS ring if it's active.
2294          */
2295         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296                                         rx_ring->cnsmr_idx) {
2297                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298                              "%s: Servicing RX completion ring %d.\n",
2299                              __func__, rx_ring->cq_id);
2300                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301         }
2302
2303         if (work_done < budget) {
2304                 napi_complete(napi);
2305                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306         }
2307         return work_done;
2308 }
2309
2310 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2311 {
2312         struct ql_adapter *qdev = netdev_priv(ndev);
2313
2314         if (features & NETIF_F_HW_VLAN_RX) {
2315                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316                              "Turning on VLAN in NIC_RCV_CFG.\n");
2317                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2319         } else {
2320                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321                              "Turning off VLAN in NIC_RCV_CFG.\n");
2322                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323         }
2324 }
2325
2326 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2327         netdev_features_t features)
2328 {
2329         /*
2330          * Since there is no support for separate rx/tx vlan accel
2331          * enable/disable make sure tx flag is always in same state as rx.
2332          */
2333         if (features & NETIF_F_HW_VLAN_RX)
2334                 features |= NETIF_F_HW_VLAN_TX;
2335         else
2336                 features &= ~NETIF_F_HW_VLAN_TX;
2337
2338         return features;
2339 }
2340
2341 static int qlge_set_features(struct net_device *ndev,
2342         netdev_features_t features)
2343 {
2344         netdev_features_t changed = ndev->features ^ features;
2345
2346         if (changed & NETIF_F_HW_VLAN_RX)
2347                 qlge_vlan_mode(ndev, features);
2348
2349         return 0;
2350 }
2351
2352 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2353 {
2354         u32 enable_bit = MAC_ADDR_E;
2355
2356         if (ql_set_mac_addr_reg
2357             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2358                 netif_err(qdev, ifup, qdev->ndev,
2359                           "Failed to init vlan address.\n");
2360         }
2361 }
2362
2363 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2364 {
2365         struct ql_adapter *qdev = netdev_priv(ndev);
2366         int status;
2367
2368         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2369         if (status)
2370                 return;
2371
2372         __qlge_vlan_rx_add_vid(qdev, vid);
2373         set_bit(vid, qdev->active_vlans);
2374
2375         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2376 }
2377
2378 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2379 {
2380         u32 enable_bit = 0;
2381
2382         if (ql_set_mac_addr_reg
2383             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2384                 netif_err(qdev, ifup, qdev->ndev,
2385                           "Failed to clear vlan address.\n");
2386         }
2387 }
2388
2389 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2390 {
2391         struct ql_adapter *qdev = netdev_priv(ndev);
2392         int status;
2393
2394         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2395         if (status)
2396                 return;
2397
2398         __qlge_vlan_rx_kill_vid(qdev, vid);
2399         clear_bit(vid, qdev->active_vlans);
2400
2401         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2402 }
2403
2404 static void qlge_restore_vlan(struct ql_adapter *qdev)
2405 {
2406         int status;
2407         u16 vid;
2408
2409         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2410         if (status)
2411                 return;
2412
2413         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2414                 __qlge_vlan_rx_add_vid(qdev, vid);
2415
2416         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2417 }
2418
2419 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2420 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2421 {
2422         struct rx_ring *rx_ring = dev_id;
2423         napi_schedule(&rx_ring->napi);
2424         return IRQ_HANDLED;
2425 }
2426
2427 /* This handles a fatal error, MPI activity, and the default
2428  * rx_ring in an MSI-X multiple vector environment.
2429  * In MSI/Legacy environment it also process the rest of
2430  * the rx_rings.
2431  */
2432 static irqreturn_t qlge_isr(int irq, void *dev_id)
2433 {
2434         struct rx_ring *rx_ring = dev_id;
2435         struct ql_adapter *qdev = rx_ring->qdev;
2436         struct intr_context *intr_context = &qdev->intr_context[0];
2437         u32 var;
2438         int work_done = 0;
2439
2440         spin_lock(&qdev->hw_lock);
2441         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2442                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2443                              "Shared Interrupt, Not ours!\n");
2444                 spin_unlock(&qdev->hw_lock);
2445                 return IRQ_NONE;
2446         }
2447         spin_unlock(&qdev->hw_lock);
2448
2449         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2450
2451         /*
2452          * Check for fatal error.
2453          */
2454         if (var & STS_FE) {
2455                 ql_queue_asic_error(qdev);
2456                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2457                 var = ql_read32(qdev, ERR_STS);
2458                 netdev_err(qdev->ndev, "Resetting chip. "
2459                                         "Error Status Register = 0x%x\n", var);
2460                 return IRQ_HANDLED;
2461         }
2462
2463         /*
2464          * Check MPI processor activity.
2465          */
2466         if ((var & STS_PI) &&
2467                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2468                 /*
2469                  * We've got an async event or mailbox completion.
2470                  * Handle it and clear the source of the interrupt.
2471                  */
2472                 netif_err(qdev, intr, qdev->ndev,
2473                           "Got MPI processor interrupt.\n");
2474                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2475                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2476                 queue_delayed_work_on(smp_processor_id(),
2477                                 qdev->workqueue, &qdev->mpi_work, 0);
2478                 work_done++;
2479         }
2480
2481         /*
2482          * Get the bit-mask that shows the active queues for this
2483          * pass.  Compare it to the queues that this irq services
2484          * and call napi if there's a match.
2485          */
2486         var = ql_read32(qdev, ISR1);
2487         if (var & intr_context->irq_mask) {
2488                 netif_info(qdev, intr, qdev->ndev,
2489                            "Waking handler for rx_ring[0].\n");
2490                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2491                 napi_schedule(&rx_ring->napi);
2492                 work_done++;
2493         }
2494         ql_enable_completion_interrupt(qdev, intr_context->intr);
2495         return work_done ? IRQ_HANDLED : IRQ_NONE;
2496 }
2497
2498 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2499 {
2500
2501         if (skb_is_gso(skb)) {
2502                 int err;
2503                 if (skb_header_cloned(skb)) {
2504                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2505                         if (err)
2506                                 return err;
2507                 }
2508
2509                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2510                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2511                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2512                 mac_iocb_ptr->total_hdrs_len =
2513                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2514                 mac_iocb_ptr->net_trans_offset =
2515                     cpu_to_le16(skb_network_offset(skb) |
2516                                 skb_transport_offset(skb)
2517                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2518                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2519                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2520                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2521                         struct iphdr *iph = ip_hdr(skb);
2522                         iph->check = 0;
2523                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2524                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2525                                                                  iph->daddr, 0,
2526                                                                  IPPROTO_TCP,
2527                                                                  0);
2528                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2529                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2530                         tcp_hdr(skb)->check =
2531                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2532                                              &ipv6_hdr(skb)->daddr,
2533                                              0, IPPROTO_TCP, 0);
2534                 }
2535                 return 1;
2536         }
2537         return 0;
2538 }
2539
2540 static void ql_hw_csum_setup(struct sk_buff *skb,
2541                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2542 {
2543         int len;
2544         struct iphdr *iph = ip_hdr(skb);
2545         __sum16 *check;
2546         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2547         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2548         mac_iocb_ptr->net_trans_offset =
2549                 cpu_to_le16(skb_network_offset(skb) |
2550                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2551
2552         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2553         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2554         if (likely(iph->protocol == IPPROTO_TCP)) {
2555                 check = &(tcp_hdr(skb)->check);
2556                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2557                 mac_iocb_ptr->total_hdrs_len =
2558                     cpu_to_le16(skb_transport_offset(skb) +
2559                                 (tcp_hdr(skb)->doff << 2));
2560         } else {
2561                 check = &(udp_hdr(skb)->check);
2562                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2563                 mac_iocb_ptr->total_hdrs_len =
2564                     cpu_to_le16(skb_transport_offset(skb) +
2565                                 sizeof(struct udphdr));
2566         }
2567         *check = ~csum_tcpudp_magic(iph->saddr,
2568                                     iph->daddr, len, iph->protocol, 0);
2569 }
2570
2571 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2572 {
2573         struct tx_ring_desc *tx_ring_desc;
2574         struct ob_mac_iocb_req *mac_iocb_ptr;
2575         struct ql_adapter *qdev = netdev_priv(ndev);
2576         int tso;
2577         struct tx_ring *tx_ring;
2578         u32 tx_ring_idx = (u32) skb->queue_mapping;
2579
2580         tx_ring = &qdev->tx_ring[tx_ring_idx];
2581
2582         if (skb_padto(skb, ETH_ZLEN))
2583                 return NETDEV_TX_OK;
2584
2585         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2586                 netif_info(qdev, tx_queued, qdev->ndev,
2587                            "%s: shutting down tx queue %d du to lack of resources.\n",
2588                            __func__, tx_ring_idx);
2589                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2590                 atomic_inc(&tx_ring->queue_stopped);
2591                 tx_ring->tx_errors++;
2592                 return NETDEV_TX_BUSY;
2593         }
2594         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2595         mac_iocb_ptr = tx_ring_desc->queue_entry;
2596         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2597
2598         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2599         mac_iocb_ptr->tid = tx_ring_desc->index;
2600         /* We use the upper 32-bits to store the tx queue for this IO.
2601          * When we get the completion we can use it to establish the context.
2602          */
2603         mac_iocb_ptr->txq_idx = tx_ring_idx;
2604         tx_ring_desc->skb = skb;
2605
2606         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2607
2608         if (vlan_tx_tag_present(skb)) {
2609                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2610                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2611                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2612                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2613         }
2614         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2615         if (tso < 0) {
2616                 dev_kfree_skb_any(skb);
2617                 return NETDEV_TX_OK;
2618         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2619                 ql_hw_csum_setup(skb,
2620                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2621         }
2622         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2623                         NETDEV_TX_OK) {
2624                 netif_err(qdev, tx_queued, qdev->ndev,
2625                           "Could not map the segments.\n");
2626                 tx_ring->tx_errors++;
2627                 return NETDEV_TX_BUSY;
2628         }
2629         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2630         tx_ring->prod_idx++;
2631         if (tx_ring->prod_idx == tx_ring->wq_len)
2632                 tx_ring->prod_idx = 0;
2633         wmb();
2634
2635         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2636         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2637                      "tx queued, slot %d, len %d\n",
2638                      tx_ring->prod_idx, skb->len);
2639
2640         atomic_dec(&tx_ring->tx_count);
2641         return NETDEV_TX_OK;
2642 }
2643
2644
2645 static void ql_free_shadow_space(struct ql_adapter *qdev)
2646 {
2647         if (qdev->rx_ring_shadow_reg_area) {
2648                 pci_free_consistent(qdev->pdev,
2649                                     PAGE_SIZE,
2650                                     qdev->rx_ring_shadow_reg_area,
2651                                     qdev->rx_ring_shadow_reg_dma);
2652                 qdev->rx_ring_shadow_reg_area = NULL;
2653         }
2654         if (qdev->tx_ring_shadow_reg_area) {
2655                 pci_free_consistent(qdev->pdev,
2656                                     PAGE_SIZE,
2657                                     qdev->tx_ring_shadow_reg_area,
2658                                     qdev->tx_ring_shadow_reg_dma);
2659                 qdev->tx_ring_shadow_reg_area = NULL;
2660         }
2661 }
2662
2663 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2664 {
2665         qdev->rx_ring_shadow_reg_area =
2666             pci_alloc_consistent(qdev->pdev,
2667                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2668         if (qdev->rx_ring_shadow_reg_area == NULL) {
2669                 netif_err(qdev, ifup, qdev->ndev,
2670                           "Allocation of RX shadow space failed.\n");
2671                 return -ENOMEM;
2672         }
2673         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2674         qdev->tx_ring_shadow_reg_area =
2675             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2676                                  &qdev->tx_ring_shadow_reg_dma);
2677         if (qdev->tx_ring_shadow_reg_area == NULL) {
2678                 netif_err(qdev, ifup, qdev->ndev,
2679                           "Allocation of TX shadow space failed.\n");
2680                 goto err_wqp_sh_area;
2681         }
2682         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2683         return 0;
2684
2685 err_wqp_sh_area:
2686         pci_free_consistent(qdev->pdev,
2687                             PAGE_SIZE,
2688                             qdev->rx_ring_shadow_reg_area,
2689                             qdev->rx_ring_shadow_reg_dma);
2690         return -ENOMEM;
2691 }
2692
2693 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2694 {
2695         struct tx_ring_desc *tx_ring_desc;
2696         int i;
2697         struct ob_mac_iocb_req *mac_iocb_ptr;
2698
2699         mac_iocb_ptr = tx_ring->wq_base;
2700         tx_ring_desc = tx_ring->q;
2701         for (i = 0; i < tx_ring->wq_len; i++) {
2702                 tx_ring_desc->index = i;
2703                 tx_ring_desc->skb = NULL;
2704                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2705                 mac_iocb_ptr++;
2706                 tx_ring_desc++;
2707         }
2708         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2709         atomic_set(&tx_ring->queue_stopped, 0);
2710 }
2711
2712 static void ql_free_tx_resources(struct ql_adapter *qdev,
2713                                  struct tx_ring *tx_ring)
2714 {
2715         if (tx_ring->wq_base) {
2716                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2717                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2718                 tx_ring->wq_base = NULL;
2719         }
2720         kfree(tx_ring->q);
2721         tx_ring->q = NULL;
2722 }
2723
2724 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2725                                  struct tx_ring *tx_ring)
2726 {
2727         tx_ring->wq_base =
2728             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2729                                  &tx_ring->wq_base_dma);
2730
2731         if ((tx_ring->wq_base == NULL) ||
2732             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2733                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2734                 return -ENOMEM;
2735         }
2736         tx_ring->q =
2737             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2738         if (tx_ring->q == NULL)
2739                 goto err;
2740
2741         return 0;
2742 err:
2743         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2744                             tx_ring->wq_base, tx_ring->wq_base_dma);
2745         return -ENOMEM;
2746 }
2747
2748 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2749 {
2750         struct bq_desc *lbq_desc;
2751
2752         uint32_t  curr_idx, clean_idx;
2753
2754         curr_idx = rx_ring->lbq_curr_idx;
2755         clean_idx = rx_ring->lbq_clean_idx;
2756         while (curr_idx != clean_idx) {
2757                 lbq_desc = &rx_ring->lbq[curr_idx];
2758
2759                 if (lbq_desc->p.pg_chunk.last_flag) {
2760                         pci_unmap_page(qdev->pdev,
2761                                 lbq_desc->p.pg_chunk.map,
2762                                 ql_lbq_block_size(qdev),
2763                                        PCI_DMA_FROMDEVICE);
2764                         lbq_desc->p.pg_chunk.last_flag = 0;
2765                 }
2766
2767                 put_page(lbq_desc->p.pg_chunk.page);
2768                 lbq_desc->p.pg_chunk.page = NULL;
2769
2770                 if (++curr_idx == rx_ring->lbq_len)
2771                         curr_idx = 0;
2772
2773         }
2774 }
2775
2776 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2777 {
2778         int i;
2779         struct bq_desc *sbq_desc;
2780
2781         for (i = 0; i < rx_ring->sbq_len; i++) {
2782                 sbq_desc = &rx_ring->sbq[i];
2783                 if (sbq_desc == NULL) {
2784                         netif_err(qdev, ifup, qdev->ndev,
2785                                   "sbq_desc %d is NULL.\n", i);
2786                         return;
2787                 }
2788                 if (sbq_desc->p.skb) {
2789                         pci_unmap_single(qdev->pdev,
2790                                          dma_unmap_addr(sbq_desc, mapaddr),
2791                                          dma_unmap_len(sbq_desc, maplen),
2792                                          PCI_DMA_FROMDEVICE);
2793                         dev_kfree_skb(sbq_desc->p.skb);
2794                         sbq_desc->p.skb = NULL;
2795                 }
2796         }
2797 }
2798
2799 /* Free all large and small rx buffers associated
2800  * with the completion queues for this device.
2801  */
2802 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2803 {
2804         int i;
2805         struct rx_ring *rx_ring;
2806
2807         for (i = 0; i < qdev->rx_ring_count; i++) {
2808                 rx_ring = &qdev->rx_ring[i];
2809                 if (rx_ring->lbq)
2810                         ql_free_lbq_buffers(qdev, rx_ring);
2811                 if (rx_ring->sbq)
2812                         ql_free_sbq_buffers(qdev, rx_ring);
2813         }
2814 }
2815
2816 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2817 {
2818         struct rx_ring *rx_ring;
2819         int i;
2820
2821         for (i = 0; i < qdev->rx_ring_count; i++) {
2822                 rx_ring = &qdev->rx_ring[i];
2823                 if (rx_ring->type != TX_Q)
2824                         ql_update_buffer_queues(qdev, rx_ring);
2825         }
2826 }
2827
2828 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2829                                 struct rx_ring *rx_ring)
2830 {
2831         int i;
2832         struct bq_desc *lbq_desc;
2833         __le64 *bq = rx_ring->lbq_base;
2834
2835         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2836         for (i = 0; i < rx_ring->lbq_len; i++) {
2837                 lbq_desc = &rx_ring->lbq[i];
2838                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2839                 lbq_desc->index = i;
2840                 lbq_desc->addr = bq;
2841                 bq++;
2842         }
2843 }
2844
2845 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2846                                 struct rx_ring *rx_ring)
2847 {
2848         int i;
2849         struct bq_desc *sbq_desc;
2850         __le64 *bq = rx_ring->sbq_base;
2851
2852         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2853         for (i = 0; i < rx_ring->sbq_len; i++) {
2854                 sbq_desc = &rx_ring->sbq[i];
2855                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2856                 sbq_desc->index = i;
2857                 sbq_desc->addr = bq;
2858                 bq++;
2859         }
2860 }
2861
2862 static void ql_free_rx_resources(struct ql_adapter *qdev,
2863                                  struct rx_ring *rx_ring)
2864 {
2865         /* Free the small buffer queue. */
2866         if (rx_ring->sbq_base) {
2867                 pci_free_consistent(qdev->pdev,
2868                                     rx_ring->sbq_size,
2869                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2870                 rx_ring->sbq_base = NULL;
2871         }
2872
2873         /* Free the small buffer queue control blocks. */
2874         kfree(rx_ring->sbq);
2875         rx_ring->sbq = NULL;
2876
2877         /* Free the large buffer queue. */
2878         if (rx_ring->lbq_base) {
2879                 pci_free_consistent(qdev->pdev,
2880                                     rx_ring->lbq_size,
2881                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2882                 rx_ring->lbq_base = NULL;
2883         }
2884
2885         /* Free the large buffer queue control blocks. */
2886         kfree(rx_ring->lbq);
2887         rx_ring->lbq = NULL;
2888
2889         /* Free the rx queue. */
2890         if (rx_ring->cq_base) {
2891                 pci_free_consistent(qdev->pdev,
2892                                     rx_ring->cq_size,
2893                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2894                 rx_ring->cq_base = NULL;
2895         }
2896 }
2897
2898 /* Allocate queues and buffers for this completions queue based
2899  * on the values in the parameter structure. */
2900 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2901                                  struct rx_ring *rx_ring)
2902 {
2903
2904         /*
2905          * Allocate the completion queue for this rx_ring.
2906          */
2907         rx_ring->cq_base =
2908             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2909                                  &rx_ring->cq_base_dma);
2910
2911         if (rx_ring->cq_base == NULL) {
2912                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2913                 return -ENOMEM;
2914         }
2915
2916         if (rx_ring->sbq_len) {
2917                 /*
2918                  * Allocate small buffer queue.
2919                  */
2920                 rx_ring->sbq_base =
2921                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2922                                          &rx_ring->sbq_base_dma);
2923
2924                 if (rx_ring->sbq_base == NULL) {
2925                         netif_err(qdev, ifup, qdev->ndev,
2926                                   "Small buffer queue allocation failed.\n");
2927                         goto err_mem;
2928                 }
2929
2930                 /*
2931                  * Allocate small buffer queue control blocks.
2932                  */
2933                 rx_ring->sbq =
2934                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2935                             GFP_KERNEL);
2936                 if (rx_ring->sbq == NULL) {
2937                         netif_err(qdev, ifup, qdev->ndev,
2938                                   "Small buffer queue control block allocation failed.\n");
2939                         goto err_mem;
2940                 }
2941
2942                 ql_init_sbq_ring(qdev, rx_ring);
2943         }
2944
2945         if (rx_ring->lbq_len) {
2946                 /*
2947                  * Allocate large buffer queue.
2948                  */
2949                 rx_ring->lbq_base =
2950                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2951                                          &rx_ring->lbq_base_dma);
2952
2953                 if (rx_ring->lbq_base == NULL) {
2954                         netif_err(qdev, ifup, qdev->ndev,
2955                                   "Large buffer queue allocation failed.\n");
2956                         goto err_mem;
2957                 }
2958                 /*
2959                  * Allocate large buffer queue control blocks.
2960                  */
2961                 rx_ring->lbq =
2962                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2963                             GFP_KERNEL);
2964                 if (rx_ring->lbq == NULL) {
2965                         netif_err(qdev, ifup, qdev->ndev,
2966                                   "Large buffer queue control block allocation failed.\n");
2967                         goto err_mem;
2968                 }
2969
2970                 ql_init_lbq_ring(qdev, rx_ring);
2971         }
2972
2973         return 0;
2974
2975 err_mem:
2976         ql_free_rx_resources(qdev, rx_ring);
2977         return -ENOMEM;
2978 }
2979
2980 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2981 {
2982         struct tx_ring *tx_ring;
2983         struct tx_ring_desc *tx_ring_desc;
2984         int i, j;
2985
2986         /*
2987          * Loop through all queues and free
2988          * any resources.
2989          */
2990         for (j = 0; j < qdev->tx_ring_count; j++) {
2991                 tx_ring = &qdev->tx_ring[j];
2992                 for (i = 0; i < tx_ring->wq_len; i++) {
2993                         tx_ring_desc = &tx_ring->q[i];
2994                         if (tx_ring_desc && tx_ring_desc->skb) {
2995                                 netif_err(qdev, ifdown, qdev->ndev,
2996                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2997                                           tx_ring_desc->skb, j,
2998                                           tx_ring_desc->index);
2999                                 ql_unmap_send(qdev, tx_ring_desc,
3000                                               tx_ring_desc->map_cnt);
3001                                 dev_kfree_skb(tx_ring_desc->skb);
3002                                 tx_ring_desc->skb = NULL;
3003                         }
3004                 }
3005         }
3006 }
3007
3008 static void ql_free_mem_resources(struct ql_adapter *qdev)
3009 {
3010         int i;
3011
3012         for (i = 0; i < qdev->tx_ring_count; i++)
3013                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3014         for (i = 0; i < qdev->rx_ring_count; i++)
3015                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3016         ql_free_shadow_space(qdev);
3017 }
3018
3019 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3020 {
3021         int i;
3022
3023         /* Allocate space for our shadow registers and such. */
3024         if (ql_alloc_shadow_space(qdev))
3025                 return -ENOMEM;
3026
3027         for (i = 0; i < qdev->rx_ring_count; i++) {
3028                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3029                         netif_err(qdev, ifup, qdev->ndev,
3030                                   "RX resource allocation failed.\n");
3031                         goto err_mem;
3032                 }
3033         }
3034         /* Allocate tx queue resources */
3035         for (i = 0; i < qdev->tx_ring_count; i++) {
3036                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3037                         netif_err(qdev, ifup, qdev->ndev,
3038                                   "TX resource allocation failed.\n");
3039                         goto err_mem;
3040                 }
3041         }
3042         return 0;
3043
3044 err_mem:
3045         ql_free_mem_resources(qdev);
3046         return -ENOMEM;
3047 }
3048
3049 /* Set up the rx ring control block and pass it to the chip.
3050  * The control block is defined as
3051  * "Completion Queue Initialization Control Block", or cqicb.
3052  */
3053 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3054 {
3055         struct cqicb *cqicb = &rx_ring->cqicb;
3056         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3057                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3058         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3059                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3060         void __iomem *doorbell_area =
3061             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3062         int err = 0;
3063         u16 bq_len;
3064         u64 tmp;
3065         __le64 *base_indirect_ptr;
3066         int page_entries;
3067
3068         /* Set up the shadow registers for this ring. */
3069         rx_ring->prod_idx_sh_reg = shadow_reg;
3070         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3071         *rx_ring->prod_idx_sh_reg = 0;
3072         shadow_reg += sizeof(u64);
3073         shadow_reg_dma += sizeof(u64);
3074         rx_ring->lbq_base_indirect = shadow_reg;
3075         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3076         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3077         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3078         rx_ring->sbq_base_indirect = shadow_reg;
3079         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3080
3081         /* PCI doorbell mem area + 0x00 for consumer index register */
3082         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3083         rx_ring->cnsmr_idx = 0;
3084         rx_ring->curr_entry = rx_ring->cq_base;
3085
3086         /* PCI doorbell mem area + 0x04 for valid register */
3087         rx_ring->valid_db_reg = doorbell_area + 0x04;
3088
3089         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3090         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3091
3092         /* PCI doorbell mem area + 0x1c */
3093         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3094
3095         memset((void *)cqicb, 0, sizeof(struct cqicb));
3096         cqicb->msix_vect = rx_ring->irq;
3097
3098         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3099         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3100
3101         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3102
3103         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3104
3105         /*
3106          * Set up the control block load flags.
3107          */
3108         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3109             FLAGS_LV |          /* Load MSI-X vector */
3110             FLAGS_LI;           /* Load irq delay values */
3111         if (rx_ring->lbq_len) {
3112                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3113                 tmp = (u64)rx_ring->lbq_base_dma;
3114                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3115                 page_entries = 0;
3116                 do {
3117                         *base_indirect_ptr = cpu_to_le64(tmp);
3118                         tmp += DB_PAGE_SIZE;
3119                         base_indirect_ptr++;
3120                         page_entries++;
3121                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3122                 cqicb->lbq_addr =
3123                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3124                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3125                         (u16) rx_ring->lbq_buf_size;
3126                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3127                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3128                         (u16) rx_ring->lbq_len;
3129                 cqicb->lbq_len = cpu_to_le16(bq_len);
3130                 rx_ring->lbq_prod_idx = 0;
3131                 rx_ring->lbq_curr_idx = 0;
3132                 rx_ring->lbq_clean_idx = 0;
3133                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3134         }
3135         if (rx_ring->sbq_len) {
3136                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3137                 tmp = (u64)rx_ring->sbq_base_dma;
3138                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3139                 page_entries = 0;
3140                 do {
3141                         *base_indirect_ptr = cpu_to_le64(tmp);
3142                         tmp += DB_PAGE_SIZE;
3143                         base_indirect_ptr++;
3144                         page_entries++;
3145                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3146                 cqicb->sbq_addr =
3147                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3148                 cqicb->sbq_buf_size =
3149                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3150                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3151                         (u16) rx_ring->sbq_len;
3152                 cqicb->sbq_len = cpu_to_le16(bq_len);
3153                 rx_ring->sbq_prod_idx = 0;
3154                 rx_ring->sbq_curr_idx = 0;
3155                 rx_ring->sbq_clean_idx = 0;
3156                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3157         }
3158         switch (rx_ring->type) {
3159         case TX_Q:
3160                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3161                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3162                 break;
3163         case RX_Q:
3164                 /* Inbound completion handling rx_rings run in
3165                  * separate NAPI contexts.
3166                  */
3167                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3168                                64);
3169                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3170                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3171                 break;
3172         default:
3173                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3175         }
3176         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3177                      "Initializing rx work queue.\n");
3178         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3179                            CFG_LCQ, rx_ring->cq_id);
3180         if (err) {
3181                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3182                 return err;
3183         }
3184         return err;
3185 }
3186
3187 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3188 {
3189         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3190         void __iomem *doorbell_area =
3191             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3192         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3193             (tx_ring->wq_id * sizeof(u64));
3194         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3195             (tx_ring->wq_id * sizeof(u64));
3196         int err = 0;
3197
3198         /*
3199          * Assign doorbell registers for this tx_ring.
3200          */
3201         /* TX PCI doorbell mem area for tx producer index */
3202         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3203         tx_ring->prod_idx = 0;
3204         /* TX PCI doorbell mem area + 0x04 */
3205         tx_ring->valid_db_reg = doorbell_area + 0x04;
3206
3207         /*
3208          * Assign shadow registers for this tx_ring.
3209          */
3210         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3211         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3212
3213         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3214         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3215                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3216         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3217         wqicb->rid = 0;
3218         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3219
3220         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3221
3222         ql_init_tx_ring(qdev, tx_ring);
3223
3224         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3225                            (u16) tx_ring->wq_id);
3226         if (err) {
3227                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3228                 return err;
3229         }
3230         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3231                      "Successfully loaded WQICB.\n");
3232         return err;
3233 }
3234
3235 static void ql_disable_msix(struct ql_adapter *qdev)
3236 {
3237         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3238                 pci_disable_msix(qdev->pdev);
3239                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3240                 kfree(qdev->msi_x_entry);
3241                 qdev->msi_x_entry = NULL;
3242         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3243                 pci_disable_msi(qdev->pdev);
3244                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3245         }
3246 }
3247
3248 /* We start by trying to get the number of vectors
3249  * stored in qdev->intr_count. If we don't get that
3250  * many then we reduce the count and try again.
3251  */
3252 static void ql_enable_msix(struct ql_adapter *qdev)
3253 {
3254         int i, err;
3255
3256         /* Get the MSIX vectors. */
3257         if (qlge_irq_type == MSIX_IRQ) {
3258                 /* Try to alloc space for the msix struct,
3259                  * if it fails then go to MSI/legacy.
3260                  */
3261                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3262                                             sizeof(struct msix_entry),
3263                                             GFP_KERNEL);
3264                 if (!qdev->msi_x_entry) {
3265                         qlge_irq_type = MSI_IRQ;
3266                         goto msi;
3267                 }
3268
3269                 for (i = 0; i < qdev->intr_count; i++)
3270                         qdev->msi_x_entry[i].entry = i;
3271
3272                 /* Loop to get our vectors.  We start with
3273                  * what we want and settle for what we get.
3274                  */
3275                 do {
3276                         err = pci_enable_msix(qdev->pdev,
3277                                 qdev->msi_x_entry, qdev->intr_count);
3278                         if (err > 0)
3279                                 qdev->intr_count = err;
3280                 } while (err > 0);
3281
3282                 if (err < 0) {
3283                         kfree(qdev->msi_x_entry);
3284                         qdev->msi_x_entry = NULL;
3285                         netif_warn(qdev, ifup, qdev->ndev,
3286                                    "MSI-X Enable failed, trying MSI.\n");
3287                         qdev->intr_count = 1;
3288                         qlge_irq_type = MSI_IRQ;
3289                 } else if (err == 0) {
3290                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3291                         netif_info(qdev, ifup, qdev->ndev,
3292                                    "MSI-X Enabled, got %d vectors.\n",
3293                                    qdev->intr_count);
3294                         return;
3295                 }
3296         }
3297 msi:
3298         qdev->intr_count = 1;
3299         if (qlge_irq_type == MSI_IRQ) {
3300                 if (!pci_enable_msi(qdev->pdev)) {
3301                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3302                         netif_info(qdev, ifup, qdev->ndev,
3303                                    "Running with MSI interrupts.\n");
3304                         return;
3305                 }
3306         }
3307         qlge_irq_type = LEG_IRQ;
3308         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3309                      "Running with legacy interrupts.\n");
3310 }
3311
3312 /* Each vector services 1 RSS ring and and 1 or more
3313  * TX completion rings.  This function loops through
3314  * the TX completion rings and assigns the vector that
3315  * will service it.  An example would be if there are
3316  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3317  * This would mean that vector 0 would service RSS ring 0
3318  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3319  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3320  */
3321 static void ql_set_tx_vect(struct ql_adapter *qdev)
3322 {
3323         int i, j, vect;
3324         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3325
3326         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327                 /* Assign irq vectors to TX rx_rings.*/
3328                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3329                                          i < qdev->rx_ring_count; i++) {
3330                         if (j == tx_rings_per_vector) {
3331                                 vect++;
3332                                 j = 0;
3333                         }
3334                         qdev->rx_ring[i].irq = vect;
3335                         j++;
3336                 }
3337         } else {
3338                 /* For single vector all rings have an irq
3339                  * of zero.
3340                  */
3341                 for (i = 0; i < qdev->rx_ring_count; i++)
3342                         qdev->rx_ring[i].irq = 0;
3343         }
3344 }
3345
3346 /* Set the interrupt mask for this vector.  Each vector
3347  * will service 1 RSS ring and 1 or more TX completion
3348  * rings.  This function sets up a bit mask per vector
3349  * that indicates which rings it services.
3350  */
3351 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3352 {
3353         int j, vect = ctx->intr;
3354         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3355
3356         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3357                 /* Add the RSS ring serviced by this vector
3358                  * to the mask.
3359                  */
3360                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3361                 /* Add the TX ring(s) serviced by this vector
3362                  * to the mask. */
3363                 for (j = 0; j < tx_rings_per_vector; j++) {
3364                         ctx->irq_mask |=
3365                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3366                         (vect * tx_rings_per_vector) + j].cq_id);
3367                 }
3368         } else {
3369                 /* For single vector we just shift each queue's
3370                  * ID into the mask.
3371                  */
3372                 for (j = 0; j < qdev->rx_ring_count; j++)
3373                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3374         }
3375 }
3376
3377 /*
3378  * Here we build the intr_context structures based on
3379  * our rx_ring count and intr vector count.
3380  * The intr_context structure is used to hook each vector
3381  * to possibly different handlers.
3382  */
3383 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3384 {
3385         int i = 0;
3386         struct intr_context *intr_context = &qdev->intr_context[0];
3387
3388         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3389                 /* Each rx_ring has it's
3390                  * own intr_context since we have separate
3391                  * vectors for each queue.
3392                  */
3393                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3394                         qdev->rx_ring[i].irq = i;
3395                         intr_context->intr = i;
3396                         intr_context->qdev = qdev;
3397                         /* Set up this vector's bit-mask that indicates
3398                          * which queues it services.
3399                          */
3400                         ql_set_irq_mask(qdev, intr_context);
3401                         /*
3402                          * We set up each vectors enable/disable/read bits so
3403                          * there's no bit/mask calculations in the critical path.
3404                          */
3405                         intr_context->intr_en_mask =
3406                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3407                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3408                             | i;
3409                         intr_context->intr_dis_mask =
3410                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3412                             INTR_EN_IHD | i;
3413                         intr_context->intr_read_mask =
3414                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3415                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3416                             i;
3417                         if (i == 0) {
3418                                 /* The first vector/queue handles
3419                                  * broadcast/multicast, fatal errors,
3420                                  * and firmware events.  This in addition
3421                                  * to normal inbound NAPI processing.
3422                                  */
3423                                 intr_context->handler = qlge_isr;
3424                                 sprintf(intr_context->name, "%s-rx-%d",
3425                                         qdev->ndev->name, i);
3426                         } else {
3427                                 /*
3428                                  * Inbound queues handle unicast frames only.
3429                                  */
3430                                 intr_context->handler = qlge_msix_rx_isr;
3431                                 sprintf(intr_context->name, "%s-rx-%d",
3432                                         qdev->ndev->name, i);
3433                         }
3434                 }
3435         } else {
3436                 /*
3437                  * All rx_rings use the same intr_context since
3438                  * there is only one vector.
3439                  */
3440                 intr_context->intr = 0;
3441                 intr_context->qdev = qdev;
3442                 /*
3443                  * We set up each vectors enable/disable/read bits so
3444                  * there's no bit/mask calculations in the critical path.
3445                  */
3446                 intr_context->intr_en_mask =
3447                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3448                 intr_context->intr_dis_mask =
3449                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3450                     INTR_EN_TYPE_DISABLE;
3451                 intr_context->intr_read_mask =
3452                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3453                 /*
3454                  * Single interrupt means one handler for all rings.
3455                  */
3456                 intr_context->handler = qlge_isr;
3457                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3458                 /* Set up this vector's bit-mask that indicates
3459                  * which queues it services. In this case there is
3460                  * a single vector so it will service all RSS and
3461                  * TX completion rings.
3462                  */
3463                 ql_set_irq_mask(qdev, intr_context);
3464         }
3465         /* Tell the TX completion rings which MSIx vector
3466          * they will be using.
3467          */
3468         ql_set_tx_vect(qdev);
3469 }
3470
3471 static void ql_free_irq(struct ql_adapter *qdev)
3472 {
3473         int i;
3474         struct intr_context *intr_context = &qdev->intr_context[0];
3475
3476         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3477                 if (intr_context->hooked) {
3478                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3479                                 free_irq(qdev->msi_x_entry[i].vector,
3480                                          &qdev->rx_ring[i]);
3481                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3482                                              "freeing msix interrupt %d.\n", i);
3483                         } else {
3484                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3485                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3486                                              "freeing msi interrupt %d.\n", i);
3487                         }
3488                 }
3489         }
3490         ql_disable_msix(qdev);
3491 }
3492
3493 static int ql_request_irq(struct ql_adapter *qdev)
3494 {
3495         int i;
3496         int status = 0;
3497         struct pci_dev *pdev = qdev->pdev;
3498         struct intr_context *intr_context = &qdev->intr_context[0];
3499
3500         ql_resolve_queues_to_irqs(qdev);
3501
3502         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3503                 atomic_set(&intr_context->irq_cnt, 0);
3504                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3505                         status = request_irq(qdev->msi_x_entry[i].vector,
3506                                              intr_context->handler,
3507                                              0,
3508                                              intr_context->name,
3509                                              &qdev->rx_ring[i]);
3510                         if (status) {
3511                                 netif_err(qdev, ifup, qdev->ndev,
3512                                           "Failed request for MSIX interrupt %d.\n",
3513                                           i);
3514                                 goto err_irq;
3515                         } else {
3516                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517                                              "Hooked intr %d, queue type %s, with name %s.\n",
3518                                              i,
3519                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3520                                              "DEFAULT_Q" :
3521                                              qdev->rx_ring[i].type == TX_Q ?
3522                                              "TX_Q" :
3523                                              qdev->rx_ring[i].type == RX_Q ?
3524                                              "RX_Q" : "",
3525                                              intr_context->name);
3526                         }
3527                 } else {
3528                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3529                                      "trying msi or legacy interrupts.\n");
3530                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3531                                      "%s: irq = %d.\n", __func__, pdev->irq);
3532                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3533                                      "%s: context->name = %s.\n", __func__,
3534                                      intr_context->name);
3535                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3536                                      "%s: dev_id = 0x%p.\n", __func__,
3537                                      &qdev->rx_ring[0]);
3538                         status =
3539                             request_irq(pdev->irq, qlge_isr,
3540                                         test_bit(QL_MSI_ENABLED,
3541                                                  &qdev->
3542                                                  flags) ? 0 : IRQF_SHARED,
3543                                         intr_context->name, &qdev->rx_ring[0]);
3544                         if (status)
3545                                 goto err_irq;
3546
3547                         netif_err(qdev, ifup, qdev->ndev,
3548                                   "Hooked intr %d, queue type %s, with name %s.\n",
3549                                   i,
3550                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3551                                   "DEFAULT_Q" :
3552                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3553                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3554                                   intr_context->name);
3555                 }
3556                 intr_context->hooked = 1;
3557         }
3558         return status;
3559 err_irq:
3560         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3561         ql_free_irq(qdev);
3562         return status;
3563 }
3564
3565 static int ql_start_rss(struct ql_adapter *qdev)
3566 {
3567         static const u8 init_hash_seed[] = {
3568                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3569                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3570                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3571                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3572                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3573         };
3574         struct ricb *ricb = &qdev->ricb;
3575         int status = 0;
3576         int i;
3577         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3578
3579         memset((void *)ricb, 0, sizeof(*ricb));
3580
3581         ricb->base_cq = RSS_L4K;
3582         ricb->flags =
3583                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3584         ricb->mask = cpu_to_le16((u16)(0x3ff));
3585
3586         /*
3587          * Fill out the Indirection Table.
3588          */
3589         for (i = 0; i < 1024; i++)
3590                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3591
3592         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3593         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3594
3595         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3596
3597         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3598         if (status) {
3599                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3600                 return status;
3601         }
3602         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3603                      "Successfully loaded RICB.\n");
3604         return status;
3605 }
3606
3607 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3608 {
3609         int i, status = 0;
3610
3611         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3612         if (status)
3613                 return status;
3614         /* Clear all the entries in the routing table. */
3615         for (i = 0; i < 16; i++) {
3616                 status = ql_set_routing_reg(qdev, i, 0, 0);
3617                 if (status) {
3618                         netif_err(qdev, ifup, qdev->ndev,
3619                                   "Failed to init routing register for CAM packets.\n");
3620                         break;
3621                 }
3622         }
3623         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3624         return status;
3625 }
3626
3627 /* Initialize the frame-to-queue routing. */
3628 static int ql_route_initialize(struct ql_adapter *qdev)
3629 {
3630         int status = 0;
3631
3632         /* Clear all the entries in the routing table. */
3633         status = ql_clear_routing_entries(qdev);
3634         if (status)
3635                 return status;
3636
3637         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3638         if (status)
3639                 return status;
3640
3641         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3642                                                 RT_IDX_IP_CSUM_ERR, 1);
3643         if (status) {
3644                 netif_err(qdev, ifup, qdev->ndev,
3645                         "Failed to init routing register "
3646                         "for IP CSUM error packets.\n");
3647                 goto exit;
3648         }
3649         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3650                                                 RT_IDX_TU_CSUM_ERR, 1);
3651         if (status) {
3652                 netif_err(qdev, ifup, qdev->ndev,
3653                         "Failed to init routing register "
3654                         "for TCP/UDP CSUM error packets.\n");
3655                 goto exit;
3656         }
3657         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3658         if (status) {
3659                 netif_err(qdev, ifup, qdev->ndev,
3660                           "Failed to init routing register for broadcast packets.\n");
3661                 goto exit;
3662         }
3663         /* If we have more than one inbound queue, then turn on RSS in the
3664          * routing block.
3665          */
3666         if (qdev->rss_ring_count > 1) {
3667                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3668                                         RT_IDX_RSS_MATCH, 1);
3669                 if (status) {
3670                         netif_err(qdev, ifup, qdev->ndev,
3671                                   "Failed to init routing register for MATCH RSS packets.\n");
3672                         goto exit;
3673                 }
3674         }
3675
3676         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3677                                     RT_IDX_CAM_HIT, 1);
3678         if (status)
3679                 netif_err(qdev, ifup, qdev->ndev,
3680                           "Failed to init routing register for CAM packets.\n");
3681 exit:
3682         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3683         return status;
3684 }
3685
3686 int ql_cam_route_initialize(struct ql_adapter *qdev)
3687 {
3688         int status, set;
3689
3690         /* If check if the link is up and use to
3691          * determine if we are setting or clearing
3692          * the MAC address in the CAM.
3693          */
3694         set = ql_read32(qdev, STS);
3695         set &= qdev->port_link_up;
3696         status = ql_set_mac_addr(qdev, set);
3697         if (status) {
3698                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3699                 return status;
3700         }
3701
3702         status = ql_route_initialize(qdev);
3703         if (status)
3704                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3705
3706         return status;
3707 }
3708
3709 static int ql_adapter_initialize(struct ql_adapter *qdev)
3710 {
3711         u32 value, mask;
3712         int i;
3713         int status = 0;
3714
3715         /*
3716          * Set up the System register to halt on errors.
3717          */
3718         value = SYS_EFE | SYS_FAE;
3719         mask = value << 16;
3720         ql_write32(qdev, SYS, mask | value);
3721
3722         /* Set the default queue, and VLAN behavior. */
3723         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3724         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3725         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3726
3727         /* Set the MPI interrupt to enabled. */
3728         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3729
3730         /* Enable the function, set pagesize, enable error checking. */
3731         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3732             FSC_EC | FSC_VM_PAGE_4K;
3733         value |= SPLT_SETTING;
3734
3735         /* Set/clear header splitting. */
3736         mask = FSC_VM_PAGESIZE_MASK |
3737             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3738         ql_write32(qdev, FSC, mask | value);
3739
3740         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3741
3742         /* Set RX packet routing to use port/pci function on which the
3743          * packet arrived on in addition to usual frame routing.
3744          * This is helpful on bonding where both interfaces can have
3745          * the same MAC address.
3746          */
3747         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3748         /* Reroute all packets to our Interface.
3749          * They may have been routed to MPI firmware
3750          * due to WOL.
3751          */
3752         value = ql_read32(qdev, MGMT_RCV_CFG);
3753         value &= ~MGMT_RCV_CFG_RM;
3754         mask = 0xffff0000;
3755
3756         /* Sticky reg needs clearing due to WOL. */
3757         ql_write32(qdev, MGMT_RCV_CFG, mask);
3758         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3759
3760         /* Default WOL is enable on Mezz cards */
3761         if (qdev->pdev->subsystem_device == 0x0068 ||
3762                         qdev->pdev->subsystem_device == 0x0180)
3763                 qdev->wol = WAKE_MAGIC;
3764
3765         /* Start up the rx queues. */
3766         for (i = 0; i < qdev->rx_ring_count; i++) {
3767                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3768                 if (status) {
3769                         netif_err(qdev, ifup, qdev->ndev,
3770                                   "Failed to start rx ring[%d].\n", i);
3771                         return status;
3772                 }
3773         }
3774
3775         /* If there is more than one inbound completion queue
3776          * then download a RICB to configure RSS.
3777          */
3778         if (qdev->rss_ring_count > 1) {
3779                 status = ql_start_rss(qdev);
3780                 if (status) {
3781                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3782                         return status;
3783                 }
3784         }
3785
3786         /* Start up the tx queues. */
3787         for (i = 0; i < qdev->tx_ring_count; i++) {
3788                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3789                 if (status) {
3790                         netif_err(qdev, ifup, qdev->ndev,
3791                                   "Failed to start tx ring[%d].\n", i);
3792                         return status;
3793                 }
3794         }
3795
3796         /* Initialize the port and set the max framesize. */
3797         status = qdev->nic_ops->port_initialize(qdev);
3798         if (status)
3799                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3800
3801         /* Set up the MAC address and frame routing filter. */
3802         status = ql_cam_route_initialize(qdev);
3803         if (status) {
3804                 netif_err(qdev, ifup, qdev->ndev,
3805                           "Failed to init CAM/Routing tables.\n");
3806                 return status;
3807         }
3808
3809         /* Start NAPI for the RSS queues. */
3810         for (i = 0; i < qdev->rss_ring_count; i++) {
3811                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3812                              "Enabling NAPI for rx_ring[%d].\n", i);
3813                 napi_enable(&qdev->rx_ring[i].napi);
3814         }
3815
3816         return status;
3817 }
3818
3819 /* Issue soft reset to chip. */
3820 static int ql_adapter_reset(struct ql_adapter *qdev)
3821 {
3822         u32 value;
3823         int status = 0;
3824         unsigned long end_jiffies;
3825
3826         /* Clear all the entries in the routing table. */
3827         status = ql_clear_routing_entries(qdev);
3828         if (status) {
3829                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3830                 return status;
3831         }
3832
3833         end_jiffies = jiffies +
3834                 max((unsigned long)1, usecs_to_jiffies(30));
3835
3836         /* Check if bit is set then skip the mailbox command and
3837          * clear the bit, else we are in normal reset process.
3838          */
3839         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3840                 /* Stop management traffic. */
3841                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3842
3843                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3844                 ql_wait_fifo_empty(qdev);
3845         } else
3846                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3847
3848         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3849
3850         do {
3851                 value = ql_read32(qdev, RST_FO);
3852                 if ((value & RST_FO_FR) == 0)
3853                         break;
3854                 cpu_relax();
3855         } while (time_before(jiffies, end_jiffies));
3856
3857         if (value & RST_FO_FR) {
3858                 netif_err(qdev, ifdown, qdev->ndev,
3859                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3860                 status = -ETIMEDOUT;
3861         }
3862
3863         /* Resume management traffic. */
3864         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3865         return status;
3866 }
3867
3868 static void ql_display_dev_info(struct net_device *ndev)
3869 {
3870         struct ql_adapter *qdev = netdev_priv(ndev);
3871
3872         netif_info(qdev, probe, qdev->ndev,
3873                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3874                    "XG Roll = %d, XG Rev = %d.\n",
3875                    qdev->func,
3876                    qdev->port,
3877                    qdev->chip_rev_id & 0x0000000f,
3878                    qdev->chip_rev_id >> 4 & 0x0000000f,
3879                    qdev->chip_rev_id >> 8 & 0x0000000f,
3880                    qdev->chip_rev_id >> 12 & 0x0000000f);
3881         netif_info(qdev, probe, qdev->ndev,
3882                    "MAC address %pM\n", ndev->dev_addr);
3883 }
3884
3885 static int ql_wol(struct ql_adapter *qdev)
3886 {
3887         int status = 0;
3888         u32 wol = MB_WOL_DISABLE;
3889
3890         /* The CAM is still intact after a reset, but if we
3891          * are doing WOL, then we may need to program the
3892          * routing regs. We would also need to issue the mailbox
3893          * commands to instruct the MPI what to do per the ethtool
3894          * settings.
3895          */
3896
3897         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3898                         WAKE_MCAST | WAKE_BCAST)) {
3899                 netif_err(qdev, ifdown, qdev->ndev,
3900                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3901                           qdev->wol);
3902                 return -EINVAL;
3903         }
3904
3905         if (qdev->wol & WAKE_MAGIC) {
3906                 status = ql_mb_wol_set_magic(qdev, 1);
3907                 if (status) {
3908                         netif_err(qdev, ifdown, qdev->ndev,
3909                                   "Failed to set magic packet on %s.\n",
3910                                   qdev->ndev->name);
3911                         return status;
3912                 } else
3913                         netif_info(qdev, drv, qdev->ndev,
3914                                    "Enabled magic packet successfully on %s.\n",
3915                                    qdev->ndev->name);
3916
3917                 wol |= MB_WOL_MAGIC_PKT;
3918         }
3919
3920         if (qdev->wol) {
3921                 wol |= MB_WOL_MODE_ON;
3922                 status = ql_mb_wol_mode(qdev, wol);
3923                 netif_err(qdev, drv, qdev->ndev,
3924                           "WOL %s (wol code 0x%x) on %s\n",
3925                           (status == 0) ? "Successfully set" : "Failed",
3926                           wol, qdev->ndev->name);
3927         }
3928
3929         return status;
3930 }
3931
3932 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3933 {
3934
3935         /* Don't kill the reset worker thread if we
3936          * are in the process of recovery.
3937          */
3938         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3939                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3940         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3941         cancel_delayed_work_sync(&qdev->mpi_work);
3942         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3943         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3944         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3945 }
3946
3947 static int ql_adapter_down(struct ql_adapter *qdev)
3948 {
3949         int i, status = 0;
3950
3951         ql_link_off(qdev);
3952
3953         ql_cancel_all_work_sync(qdev);
3954
3955         for (i = 0; i < qdev->rss_ring_count; i++)
3956                 napi_disable(&qdev->rx_ring[i].napi);
3957
3958         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3959
3960         ql_disable_interrupts(qdev);
3961
3962         ql_tx_ring_clean(qdev);
3963
3964         /* Call netif_napi_del() from common point.
3965          */
3966         for (i = 0; i < qdev->rss_ring_count; i++)
3967                 netif_napi_del(&qdev->rx_ring[i].napi);
3968
3969         status = ql_adapter_reset(qdev);
3970         if (status)
3971                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3972                           qdev->func);
3973         ql_free_rx_buffers(qdev);
3974
3975         return status;
3976 }
3977
3978 static int ql_adapter_up(struct ql_adapter *qdev)
3979 {
3980         int err = 0;
3981
3982         err = ql_adapter_initialize(qdev);
3983         if (err) {
3984                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3985                 goto err_init;
3986         }
3987         set_bit(QL_ADAPTER_UP, &qdev->flags);
3988         ql_alloc_rx_buffers(qdev);
3989         /* If the port is initialized and the
3990          * link is up the turn on the carrier.
3991          */
3992         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3993                         (ql_read32(qdev, STS) & qdev->port_link_up))
3994                 ql_link_on(qdev);
3995         /* Restore rx mode. */
3996         clear_bit(QL_ALLMULTI, &qdev->flags);
3997         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3998         qlge_set_multicast_list(qdev->ndev);
3999
4000         /* Restore vlan setting. */
4001         qlge_restore_vlan(qdev);
4002
4003         ql_enable_interrupts(qdev);
4004         ql_enable_all_completion_interrupts(qdev);
4005         netif_tx_start_all_queues(qdev->ndev);
4006
4007         return 0;
4008 err_init:
4009         ql_adapter_reset(qdev);
4010         return err;
4011 }
4012
4013 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4014 {
4015         ql_free_mem_resources(qdev);
4016         ql_free_irq(qdev);
4017 }
4018
4019 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4020 {
4021         int status = 0;
4022
4023         if (ql_alloc_mem_resources(qdev)) {
4024                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4025                 return -ENOMEM;
4026         }
4027         status = ql_request_irq(qdev);
4028         return status;
4029 }
4030
4031 static int qlge_close(struct net_device *ndev)
4032 {
4033         struct ql_adapter *qdev = netdev_priv(ndev);
4034
4035         /* If we hit pci_channel_io_perm_failure
4036          * failure condition, then we already
4037          * brought the adapter down.
4038          */
4039         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4040                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4041                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4042                 return 0;
4043         }
4044
4045         /*
4046          * Wait for device to recover from a reset.
4047          * (Rarely happens, but possible.)
4048          */
4049         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4050                 msleep(1);
4051         ql_adapter_down(qdev);
4052         ql_release_adapter_resources(qdev);
4053         return 0;
4054 }
4055
4056 static int ql_configure_rings(struct ql_adapter *qdev)
4057 {
4058         int i;
4059         struct rx_ring *rx_ring;
4060         struct tx_ring *tx_ring;
4061         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4062         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4063                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4064
4065         qdev->lbq_buf_order = get_order(lbq_buf_len);
4066
4067         /* In a perfect world we have one RSS ring for each CPU
4068          * and each has it's own vector.  To do that we ask for
4069          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4070          * vector count to what we actually get.  We then
4071          * allocate an RSS ring for each.
4072          * Essentially, we are doing min(cpu_count, msix_vector_count).
4073          */
4074         qdev->intr_count = cpu_cnt;
4075         ql_enable_msix(qdev);
4076         /* Adjust the RSS ring count to the actual vector count. */
4077         qdev->rss_ring_count = qdev->intr_count;
4078         qdev->tx_ring_count = cpu_cnt;
4079         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4080
4081         for (i = 0; i < qdev->tx_ring_count; i++) {
4082                 tx_ring = &qdev->tx_ring[i];
4083                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4084                 tx_ring->qdev = qdev;
4085                 tx_ring->wq_id = i;
4086                 tx_ring->wq_len = qdev->tx_ring_size;
4087                 tx_ring->wq_size =
4088                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4089
4090                 /*
4091                  * The completion queue ID for the tx rings start
4092                  * immediately after the rss rings.
4093                  */
4094                 tx_ring->cq_id = qdev->rss_ring_count + i;
4095         }
4096
4097         for (i = 0; i < qdev->rx_ring_count; i++) {
4098                 rx_ring = &qdev->rx_ring[i];
4099                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4100                 rx_ring->qdev = qdev;
4101                 rx_ring->cq_id = i;
4102                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4103                 if (i < qdev->rss_ring_count) {
4104                         /*
4105                          * Inbound (RSS) queues.
4106                          */
4107                         rx_ring->cq_len = qdev->rx_ring_size;
4108                         rx_ring->cq_size =
4109                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4110                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4111                         rx_ring->lbq_size =
4112                             rx_ring->lbq_len * sizeof(__le64);
4113                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4114                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4115                                      "lbq_buf_size %d, order = %d\n",
4116                                      rx_ring->lbq_buf_size,
4117                                      qdev->lbq_buf_order);
4118                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4119                         rx_ring->sbq_size =
4120                             rx_ring->sbq_len * sizeof(__le64);
4121                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4122                         rx_ring->type = RX_Q;
4123                 } else {
4124                         /*
4125                          * Outbound queue handles outbound completions only.
4126                          */
4127                         /* outbound cq is same size as tx_ring it services. */
4128                         rx_ring->cq_len = qdev->tx_ring_size;
4129                         rx_ring->cq_size =
4130                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4131                         rx_ring->lbq_len = 0;
4132                         rx_ring->lbq_size = 0;
4133                         rx_ring->lbq_buf_size = 0;
4134                         rx_ring->sbq_len = 0;
4135                         rx_ring->sbq_size = 0;
4136                         rx_ring->sbq_buf_size = 0;
4137                         rx_ring->type = TX_Q;
4138                 }
4139         }
4140         return 0;
4141 }
4142
4143 static int qlge_open(struct net_device *ndev)
4144 {
4145         int err = 0;
4146         struct ql_adapter *qdev = netdev_priv(ndev);
4147
4148         err = ql_adapter_reset(qdev);
4149         if (err)
4150                 return err;
4151
4152         err = ql_configure_rings(qdev);
4153         if (err)
4154                 return err;
4155
4156         err = ql_get_adapter_resources(qdev);
4157         if (err)
4158                 goto error_up;
4159
4160         err = ql_adapter_up(qdev);
4161         if (err)
4162                 goto error_up;
4163
4164         return err;
4165
4166 error_up:
4167         ql_release_adapter_resources(qdev);
4168         return err;
4169 }
4170
4171 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4172 {
4173         struct rx_ring *rx_ring;
4174         int i, status;
4175         u32 lbq_buf_len;
4176
4177         /* Wait for an outstanding reset to complete. */
4178         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4179                 int i = 3;
4180                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4181                         netif_err(qdev, ifup, qdev->ndev,
4182                                   "Waiting for adapter UP...\n");
4183                         ssleep(1);
4184                 }
4185
4186                 if (!i) {
4187                         netif_err(qdev, ifup, qdev->ndev,
4188                                   "Timed out waiting for adapter UP\n");
4189                         return -ETIMEDOUT;
4190                 }
4191         }
4192
4193         status = ql_adapter_down(qdev);
4194         if (status)
4195                 goto error;
4196
4197         /* Get the new rx buffer size. */
4198         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4199                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4200         qdev->lbq_buf_order = get_order(lbq_buf_len);
4201
4202         for (i = 0; i < qdev->rss_ring_count; i++) {
4203                 rx_ring = &qdev->rx_ring[i];
4204                 /* Set the new size. */
4205                 rx_ring->lbq_buf_size = lbq_buf_len;
4206         }
4207
4208         status = ql_adapter_up(qdev);
4209         if (status)
4210                 goto error;
4211
4212         return status;
4213 error:
4214         netif_alert(qdev, ifup, qdev->ndev,
4215                     "Driver up/down cycle failed, closing device.\n");
4216         set_bit(QL_ADAPTER_UP, &qdev->flags);
4217         dev_close(qdev->ndev);
4218         return status;
4219 }
4220
4221 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4222 {
4223         struct ql_adapter *qdev = netdev_priv(ndev);
4224         int status;
4225
4226         if (ndev->mtu == 1500 && new_mtu == 9000) {
4227                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4228         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4229                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4230         } else
4231                 return -EINVAL;
4232
4233         queue_delayed_work(qdev->workqueue,
4234                         &qdev->mpi_port_cfg_work, 3*HZ);
4235
4236         ndev->mtu = new_mtu;
4237
4238         if (!netif_running(qdev->ndev)) {
4239                 return 0;
4240         }
4241
4242         status = ql_change_rx_buffers(qdev);
4243         if (status) {
4244                 netif_err(qdev, ifup, qdev->ndev,
4245                           "Changing MTU failed.\n");
4246         }
4247
4248         return status;
4249 }
4250
4251 static struct net_device_stats *qlge_get_stats(struct net_device
4252                                                *ndev)
4253 {
4254         struct ql_adapter *qdev = netdev_priv(ndev);
4255         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4256         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4257         unsigned long pkts, mcast, dropped, errors, bytes;
4258         int i;
4259
4260         /* Get RX stats. */
4261         pkts = mcast = dropped = errors = bytes = 0;
4262         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4263                         pkts += rx_ring->rx_packets;
4264                         bytes += rx_ring->rx_bytes;
4265                         dropped += rx_ring->rx_dropped;
4266                         errors += rx_ring->rx_errors;
4267                         mcast += rx_ring->rx_multicast;
4268         }
4269         ndev->stats.rx_packets = pkts;
4270         ndev->stats.rx_bytes = bytes;
4271         ndev->stats.rx_dropped = dropped;
4272         ndev->stats.rx_errors = errors;
4273         ndev->stats.multicast = mcast;
4274
4275         /* Get TX stats. */
4276         pkts = errors = bytes = 0;
4277         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4278                         pkts += tx_ring->tx_packets;
4279                         bytes += tx_ring->tx_bytes;
4280                         errors += tx_ring->tx_errors;
4281         }
4282         ndev->stats.tx_packets = pkts;
4283         ndev->stats.tx_bytes = bytes;
4284         ndev->stats.tx_errors = errors;
4285         return &ndev->stats;
4286 }
4287
4288 static void qlge_set_multicast_list(struct net_device *ndev)
4289 {
4290         struct ql_adapter *qdev = netdev_priv(ndev);
4291         struct netdev_hw_addr *ha;
4292         int i, status;
4293
4294         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4295         if (status)
4296                 return;
4297         /*
4298          * Set or clear promiscuous mode if a
4299          * transition is taking place.
4300          */
4301         if (ndev->flags & IFF_PROMISC) {
4302                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4303                         if (ql_set_routing_reg
4304                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4305                                 netif_err(qdev, hw, qdev->ndev,
4306                                           "Failed to set promiscuous mode.\n");
4307                         } else {
4308                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4309                         }
4310                 }
4311         } else {
4312                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4313                         if (ql_set_routing_reg
4314                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4315                                 netif_err(qdev, hw, qdev->ndev,
4316                                           "Failed to clear promiscuous mode.\n");
4317                         } else {
4318                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4319                         }
4320                 }
4321         }
4322
4323         /*
4324          * Set or clear all multicast mode if a
4325          * transition is taking place.
4326          */
4327         if ((ndev->flags & IFF_ALLMULTI) ||
4328             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4329                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4330                         if (ql_set_routing_reg
4331                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4332                                 netif_err(qdev, hw, qdev->ndev,
4333                                           "Failed to set all-multi mode.\n");
4334                         } else {
4335                                 set_bit(QL_ALLMULTI, &qdev->flags);
4336                         }
4337                 }
4338         } else {
4339                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4340                         if (ql_set_routing_reg
4341                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4342                                 netif_err(qdev, hw, qdev->ndev,
4343                                           "Failed to clear all-multi mode.\n");
4344                         } else {
4345                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4346                         }
4347                 }
4348         }
4349
4350         if (!netdev_mc_empty(ndev)) {
4351                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4352                 if (status)
4353                         goto exit;
4354                 i = 0;
4355                 netdev_for_each_mc_addr(ha, ndev) {
4356                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4357                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4358                                 netif_err(qdev, hw, qdev->ndev,
4359                                           "Failed to loadmulticast address.\n");
4360                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4361                                 goto exit;
4362                         }
4363                         i++;
4364                 }
4365                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4366                 if (ql_set_routing_reg
4367                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4368                         netif_err(qdev, hw, qdev->ndev,
4369                                   "Failed to set multicast match mode.\n");
4370                 } else {
4371                         set_bit(QL_ALLMULTI, &qdev->flags);
4372                 }
4373         }
4374 exit:
4375         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4376 }
4377
4378 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4379 {
4380         struct ql_adapter *qdev = netdev_priv(ndev);
4381         struct sockaddr *addr = p;
4382         int status;
4383
4384         if (!is_valid_ether_addr(addr->sa_data))
4385                 return -EADDRNOTAVAIL;
4386         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4387         /* Update local copy of current mac address. */
4388         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4389
4390         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4391         if (status)
4392                 return status;
4393         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4394                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4395         if (status)
4396                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4397         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4398         return status;
4399 }
4400
4401 static void qlge_tx_timeout(struct net_device *ndev)
4402 {
4403         struct ql_adapter *qdev = netdev_priv(ndev);
4404         ql_queue_asic_error(qdev);
4405 }
4406
4407 static void ql_asic_reset_work(struct work_struct *work)
4408 {
4409         struct ql_adapter *qdev =
4410             container_of(work, struct ql_adapter, asic_reset_work.work);
4411         int status;
4412         rtnl_lock();
4413         status = ql_adapter_down(qdev);
4414         if (status)
4415                 goto error;
4416
4417         status = ql_adapter_up(qdev);
4418         if (status)
4419                 goto error;
4420
4421         /* Restore rx mode. */
4422         clear_bit(QL_ALLMULTI, &qdev->flags);
4423         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4424         qlge_set_multicast_list(qdev->ndev);
4425
4426         rtnl_unlock();
4427         return;
4428 error:
4429         netif_alert(qdev, ifup, qdev->ndev,
4430                     "Driver up/down cycle failed, closing device\n");
4431
4432         set_bit(QL_ADAPTER_UP, &qdev->flags);
4433         dev_close(qdev->ndev);
4434         rtnl_unlock();
4435 }
4436
4437 static const struct nic_operations qla8012_nic_ops = {
4438         .get_flash              = ql_get_8012_flash_params,
4439         .port_initialize        = ql_8012_port_initialize,
4440 };
4441
4442 static const struct nic_operations qla8000_nic_ops = {
4443         .get_flash              = ql_get_8000_flash_params,
4444         .port_initialize        = ql_8000_port_initialize,
4445 };
4446
4447 /* Find the pcie function number for the other NIC
4448  * on this chip.  Since both NIC functions share a
4449  * common firmware we have the lowest enabled function
4450  * do any common work.  Examples would be resetting
4451  * after a fatal firmware error, or doing a firmware
4452  * coredump.
4453  */
4454 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4455 {
4456         int status = 0;
4457         u32 temp;
4458         u32 nic_func1, nic_func2;
4459
4460         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4461                         &temp);
4462         if (status)
4463                 return status;
4464
4465         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4466                         MPI_TEST_NIC_FUNC_MASK);
4467         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4468                         MPI_TEST_NIC_FUNC_MASK);
4469
4470         if (qdev->func == nic_func1)
4471                 qdev->alt_func = nic_func2;
4472         else if (qdev->func == nic_func2)
4473                 qdev->alt_func = nic_func1;
4474         else
4475                 status = -EIO;
4476
4477         return status;
4478 }
4479
4480 static int ql_get_board_info(struct ql_adapter *qdev)
4481 {
4482         int status;
4483         qdev->func =
4484             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4485         if (qdev->func > 3)
4486                 return -EIO;
4487
4488         status = ql_get_alt_pcie_func(qdev);
4489         if (status)
4490                 return status;
4491
4492         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4493         if (qdev->port) {
4494                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4495                 qdev->port_link_up = STS_PL1;
4496                 qdev->port_init = STS_PI1;
4497                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4498                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4499         } else {
4500                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4501                 qdev->port_link_up = STS_PL0;
4502                 qdev->port_init = STS_PI0;
4503                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4504                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4505         }
4506         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4507         qdev->device_id = qdev->pdev->device;
4508         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4509                 qdev->nic_ops = &qla8012_nic_ops;
4510         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4511                 qdev->nic_ops = &qla8000_nic_ops;
4512         return status;
4513 }
4514
4515 static void ql_release_all(struct pci_dev *pdev)
4516 {
4517         struct net_device *ndev = pci_get_drvdata(pdev);
4518         struct ql_adapter *qdev = netdev_priv(ndev);
4519
4520         if (qdev->workqueue) {
4521                 destroy_workqueue(qdev->workqueue);
4522                 qdev->workqueue = NULL;
4523         }
4524
4525         if (qdev->reg_base)
4526                 iounmap(qdev->reg_base);
4527         if (qdev->doorbell_area)
4528                 iounmap(qdev->doorbell_area);
4529         vfree(qdev->mpi_coredump);
4530         pci_release_regions(pdev);
4531         pci_set_drvdata(pdev, NULL);
4532 }
4533
4534 static int __devinit ql_init_device(struct pci_dev *pdev,
4535                                     struct net_device *ndev, int cards_found)
4536 {
4537         struct ql_adapter *qdev = netdev_priv(ndev);
4538         int err = 0;
4539
4540         memset((void *)qdev, 0, sizeof(*qdev));
4541         err = pci_enable_device(pdev);
4542         if (err) {
4543                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4544                 return err;
4545         }
4546
4547         qdev->ndev = ndev;
4548         qdev->pdev = pdev;
4549         pci_set_drvdata(pdev, ndev);
4550
4551         /* Set PCIe read request size */
4552         err = pcie_set_readrq(pdev, 4096);
4553         if (err) {
4554                 dev_err(&pdev->dev, "Set readrq failed.\n");
4555                 goto err_out1;
4556         }
4557
4558         err = pci_request_regions(pdev, DRV_NAME);
4559         if (err) {
4560                 dev_err(&pdev->dev, "PCI region request failed.\n");
4561                 return err;
4562         }
4563
4564         pci_set_master(pdev);
4565         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4566                 set_bit(QL_DMA64, &qdev->flags);
4567                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4568         } else {
4569                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4570                 if (!err)
4571                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4572         }
4573
4574         if (err) {
4575                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4576                 goto err_out2;
4577         }
4578
4579         /* Set PCIe reset type for EEH to fundamental. */
4580         pdev->needs_freset = 1;
4581         pci_save_state(pdev);
4582         qdev->reg_base =
4583             ioremap_nocache(pci_resource_start(pdev, 1),
4584                             pci_resource_len(pdev, 1));
4585         if (!qdev->reg_base) {
4586                 dev_err(&pdev->dev, "Register mapping failed.\n");
4587                 err = -ENOMEM;
4588                 goto err_out2;
4589         }
4590
4591         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4592         qdev->doorbell_area =
4593             ioremap_nocache(pci_resource_start(pdev, 3),
4594                             pci_resource_len(pdev, 3));
4595         if (!qdev->doorbell_area) {
4596                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4597                 err = -ENOMEM;
4598                 goto err_out2;
4599         }
4600
4601         err = ql_get_board_info(qdev);
4602         if (err) {
4603                 dev_err(&pdev->dev, "Register access failed.\n");
4604                 err = -EIO;
4605                 goto err_out2;
4606         }
4607         qdev->msg_enable = netif_msg_init(debug, default_msg);
4608         spin_lock_init(&qdev->hw_lock);
4609         spin_lock_init(&qdev->stats_lock);
4610
4611         if (qlge_mpi_coredump) {
4612                 qdev->mpi_coredump =
4613                         vmalloc(sizeof(struct ql_mpi_coredump));
4614                 if (qdev->mpi_coredump == NULL) {
4615                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4616                         err = -ENOMEM;
4617                         goto err_out2;
4618                 }
4619                 if (qlge_force_coredump)
4620                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4621         }
4622         /* make sure the EEPROM is good */
4623         err = qdev->nic_ops->get_flash(qdev);
4624         if (err) {
4625                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4626                 goto err_out2;
4627         }
4628
4629         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4630         /* Keep local copy of current mac address. */
4631         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4632
4633         /* Set up the default ring sizes. */
4634         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4635         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4636
4637         /* Set up the coalescing parameters. */
4638         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4639         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4640         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4641         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4642
4643         /*
4644          * Set up the operating parameters.
4645          */
4646         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4647         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4648         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4649         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4650         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4651         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4652         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4653         init_completion(&qdev->ide_completion);
4654         mutex_init(&qdev->mpi_mutex);
4655
4656         if (!cards_found) {
4657                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4658                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4659                          DRV_NAME, DRV_VERSION);
4660         }
4661         return 0;
4662 err_out2:
4663         ql_release_all(pdev);
4664 err_out1:
4665         pci_disable_device(pdev);
4666         return err;
4667 }
4668
4669 static const struct net_device_ops qlge_netdev_ops = {
4670         .ndo_open               = qlge_open,
4671         .ndo_stop               = qlge_close,
4672         .ndo_start_xmit         = qlge_send,
4673         .ndo_change_mtu         = qlge_change_mtu,
4674         .ndo_get_stats          = qlge_get_stats,
4675         .ndo_set_rx_mode        = qlge_set_multicast_list,
4676         .ndo_set_mac_address    = qlge_set_mac_address,
4677         .ndo_validate_addr      = eth_validate_addr,
4678         .ndo_tx_timeout         = qlge_tx_timeout,
4679         .ndo_fix_features       = qlge_fix_features,
4680         .ndo_set_features       = qlge_set_features,
4681         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4682         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4683 };
4684
4685 static void ql_timer(unsigned long data)
4686 {
4687         struct ql_adapter *qdev = (struct ql_adapter *)data;
4688         u32 var = 0;
4689
4690         var = ql_read32(qdev, STS);
4691         if (pci_channel_offline(qdev->pdev)) {
4692                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4693                 return;
4694         }
4695
4696         mod_timer(&qdev->timer, jiffies + (5*HZ));
4697 }
4698
4699 static int __devinit qlge_probe(struct pci_dev *pdev,
4700                                 const struct pci_device_id *pci_entry)
4701 {
4702         struct net_device *ndev = NULL;
4703         struct ql_adapter *qdev = NULL;
4704         static int cards_found = 0;
4705         int err = 0;
4706
4707         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4708                         min(MAX_CPUS, (int)num_online_cpus()));
4709         if (!ndev)
4710                 return -ENOMEM;
4711
4712         err = ql_init_device(pdev, ndev, cards_found);
4713         if (err < 0) {
4714                 free_netdev(ndev);
4715                 return err;
4716         }
4717
4718         qdev = netdev_priv(ndev);
4719         SET_NETDEV_DEV(ndev, &pdev->dev);
4720         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4721                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4722                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4723         ndev->features = ndev->hw_features |
4724                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4725
4726         if (test_bit(QL_DMA64, &qdev->flags))
4727                 ndev->features |= NETIF_F_HIGHDMA;
4728
4729         /*
4730          * Set up net_device structure.
4731          */
4732         ndev->tx_queue_len = qdev->tx_ring_size;
4733         ndev->irq = pdev->irq;
4734
4735         ndev->netdev_ops = &qlge_netdev_ops;
4736         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4737         ndev->watchdog_timeo = 10 * HZ;
4738
4739         err = register_netdev(ndev);
4740         if (err) {
4741                 dev_err(&pdev->dev, "net device registration failed.\n");
4742                 ql_release_all(pdev);
4743                 pci_disable_device(pdev);
4744                 return err;
4745         }
4746         /* Start up the timer to trigger EEH if
4747          * the bus goes dead
4748          */
4749         init_timer_deferrable(&qdev->timer);
4750         qdev->timer.data = (unsigned long)qdev;
4751         qdev->timer.function = ql_timer;
4752         qdev->timer.expires = jiffies + (5*HZ);
4753         add_timer(&qdev->timer);
4754         ql_link_off(qdev);
4755         ql_display_dev_info(ndev);
4756         atomic_set(&qdev->lb_count, 0);
4757         cards_found++;
4758         return 0;
4759 }
4760
4761 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4762 {
4763         return qlge_send(skb, ndev);
4764 }
4765
4766 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4767 {
4768         return ql_clean_inbound_rx_ring(rx_ring, budget);
4769 }
4770
4771 static void __devexit qlge_remove(struct pci_dev *pdev)
4772 {
4773         struct net_device *ndev = pci_get_drvdata(pdev);
4774         struct ql_adapter *qdev = netdev_priv(ndev);
4775         del_timer_sync(&qdev->timer);
4776         ql_cancel_all_work_sync(qdev);
4777         unregister_netdev(ndev);
4778         ql_release_all(pdev);
4779         pci_disable_device(pdev);
4780         free_netdev(ndev);
4781 }
4782
4783 /* Clean up resources without touching hardware. */
4784 static void ql_eeh_close(struct net_device *ndev)
4785 {
4786         int i;
4787         struct ql_adapter *qdev = netdev_priv(ndev);
4788
4789         if (netif_carrier_ok(ndev)) {
4790                 netif_carrier_off(ndev);
4791                 netif_stop_queue(ndev);
4792         }
4793
4794         /* Disabling the timer */
4795         del_timer_sync(&qdev->timer);
4796         ql_cancel_all_work_sync(qdev);
4797
4798         for (i = 0; i < qdev->rss_ring_count; i++)
4799                 netif_napi_del(&qdev->rx_ring[i].napi);
4800
4801         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4802         ql_tx_ring_clean(qdev);
4803         ql_free_rx_buffers(qdev);
4804         ql_release_adapter_resources(qdev);
4805 }
4806
4807 /*
4808  * This callback is called by the PCI subsystem whenever
4809  * a PCI bus error is detected.
4810  */
4811 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4812                                                enum pci_channel_state state)
4813 {
4814         struct net_device *ndev = pci_get_drvdata(pdev);
4815         struct ql_adapter *qdev = netdev_priv(ndev);
4816
4817         switch (state) {
4818         case pci_channel_io_normal:
4819                 return PCI_ERS_RESULT_CAN_RECOVER;
4820         case pci_channel_io_frozen:
4821                 netif_device_detach(ndev);
4822                 if (netif_running(ndev))
4823                         ql_eeh_close(ndev);
4824                 pci_disable_device(pdev);
4825                 return PCI_ERS_RESULT_NEED_RESET;
4826         case pci_channel_io_perm_failure:
4827                 dev_err(&pdev->dev,
4828                         "%s: pci_channel_io_perm_failure.\n", __func__);
4829                 ql_eeh_close(ndev);
4830                 set_bit(QL_EEH_FATAL, &qdev->flags);
4831                 return PCI_ERS_RESULT_DISCONNECT;
4832         }
4833
4834         /* Request a slot reset. */
4835         return PCI_ERS_RESULT_NEED_RESET;
4836 }
4837
4838 /*
4839  * This callback is called after the PCI buss has been reset.
4840  * Basically, this tries to restart the card from scratch.
4841  * This is a shortened version of the device probe/discovery code,
4842  * it resembles the first-half of the () routine.
4843  */
4844 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4845 {
4846         struct net_device *ndev = pci_get_drvdata(pdev);
4847         struct ql_adapter *qdev = netdev_priv(ndev);
4848
4849         pdev->error_state = pci_channel_io_normal;
4850
4851         pci_restore_state(pdev);
4852         if (pci_enable_device(pdev)) {
4853                 netif_err(qdev, ifup, qdev->ndev,
4854                           "Cannot re-enable PCI device after reset.\n");
4855                 return PCI_ERS_RESULT_DISCONNECT;
4856         }
4857         pci_set_master(pdev);
4858
4859         if (ql_adapter_reset(qdev)) {
4860                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4861                 set_bit(QL_EEH_FATAL, &qdev->flags);
4862                 return PCI_ERS_RESULT_DISCONNECT;
4863         }
4864
4865         return PCI_ERS_RESULT_RECOVERED;
4866 }
4867
4868 static void qlge_io_resume(struct pci_dev *pdev)
4869 {
4870         struct net_device *ndev = pci_get_drvdata(pdev);
4871         struct ql_adapter *qdev = netdev_priv(ndev);
4872         int err = 0;
4873
4874         if (netif_running(ndev)) {
4875                 err = qlge_open(ndev);
4876                 if (err) {
4877                         netif_err(qdev, ifup, qdev->ndev,
4878                                   "Device initialization failed after reset.\n");
4879                         return;
4880                 }
4881         } else {
4882                 netif_err(qdev, ifup, qdev->ndev,
4883                           "Device was not running prior to EEH.\n");
4884         }
4885         mod_timer(&qdev->timer, jiffies + (5*HZ));
4886         netif_device_attach(ndev);
4887 }
4888
4889 static struct pci_error_handlers qlge_err_handler = {
4890         .error_detected = qlge_io_error_detected,
4891         .slot_reset = qlge_io_slot_reset,
4892         .resume = qlge_io_resume,
4893 };
4894
4895 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4896 {
4897         struct net_device *ndev = pci_get_drvdata(pdev);
4898         struct ql_adapter *qdev = netdev_priv(ndev);
4899         int err;
4900
4901         netif_device_detach(ndev);
4902         del_timer_sync(&qdev->timer);
4903
4904         if (netif_running(ndev)) {
4905                 err = ql_adapter_down(qdev);
4906                 if (!err)
4907                         return err;
4908         }
4909
4910         ql_wol(qdev);
4911         err = pci_save_state(pdev);
4912         if (err)
4913                 return err;
4914
4915         pci_disable_device(pdev);
4916
4917         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4918
4919         return 0;
4920 }
4921
4922 #ifdef CONFIG_PM
4923 static int qlge_resume(struct pci_dev *pdev)
4924 {
4925         struct net_device *ndev = pci_get_drvdata(pdev);
4926         struct ql_adapter *qdev = netdev_priv(ndev);
4927         int err;
4928
4929         pci_set_power_state(pdev, PCI_D0);
4930         pci_restore_state(pdev);
4931         err = pci_enable_device(pdev);
4932         if (err) {
4933                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4934                 return err;
4935         }
4936         pci_set_master(pdev);
4937
4938         pci_enable_wake(pdev, PCI_D3hot, 0);
4939         pci_enable_wake(pdev, PCI_D3cold, 0);
4940
4941         if (netif_running(ndev)) {
4942                 err = ql_adapter_up(qdev);
4943                 if (err)
4944                         return err;
4945         }
4946
4947         mod_timer(&qdev->timer, jiffies + (5*HZ));
4948         netif_device_attach(ndev);
4949
4950         return 0;
4951 }
4952 #endif /* CONFIG_PM */
4953
4954 static void qlge_shutdown(struct pci_dev *pdev)
4955 {
4956         qlge_suspend(pdev, PMSG_SUSPEND);
4957 }
4958
4959 static struct pci_driver qlge_driver = {
4960         .name = DRV_NAME,
4961         .id_table = qlge_pci_tbl,
4962         .probe = qlge_probe,
4963         .remove = __devexit_p(qlge_remove),
4964 #ifdef CONFIG_PM
4965         .suspend = qlge_suspend,
4966         .resume = qlge_resume,
4967 #endif
4968         .shutdown = qlge_shutdown,
4969         .err_handler = &qlge_err_handler
4970 };
4971
4972 static int __init qlge_init_module(void)
4973 {
4974         return pci_register_driver(&qlge_driver);
4975 }
4976
4977 static void __exit qlge_exit(void)
4978 {
4979         pci_unregister_driver(&qlge_driver);
4980 }
4981
4982 module_init(qlge_init_module);
4983 module_exit(qlge_exit);