]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qlge/qlge_main.c
1dd778a6f01e8b9f61d6df2078a48c6ec4b73672
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378                         status =
379                             ql_wait_reg_rdy(qdev,
380                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381                         if (status)
382                                 goto exit;
383                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
385                                    type);       /* type */
386                         ql_write32(qdev, MAC_ADDR_DATA, lower);
387                         status =
388                             ql_wait_reg_rdy(qdev,
389                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390                         if (status)
391                                 goto exit;
392                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
394                                    type);       /* type */
395                         ql_write32(qdev, MAC_ADDR_DATA, upper);
396                         status =
397                             ql_wait_reg_rdy(qdev,
398                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399                         if (status)
400                                 goto exit;
401                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
402                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
403                                    type);       /* type */
404                         /* This field should also include the queue id
405                            and possibly the function id.  Right now we hardcode
406                            the route field to NIC core.
407                          */
408                         cam_output = (CAM_OUT_ROUTE_NIC |
409                                       (qdev->
410                                        func << CAM_OUT_FUNC_SHIFT) |
411                                         (0 << CAM_OUT_CQ_ID_SHIFT));
412                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413                                 cam_output |= CAM_OUT_RV;
414                         /* route to NIC core */
415                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416                         break;
417                 }
418         case MAC_ADDR_TYPE_VLAN:
419                 {
420                         u32 enable_bit = *((u32 *) &addr[0]);
421                         /* For VLAN, the addr actually holds a bit that
422                          * either enables or disables the vlan id we are
423                          * addressing. It's either MAC_ADDR_E on or off.
424                          * That's bit-27 we're talking about.
425                          */
426                         status =
427                             ql_wait_reg_rdy(qdev,
428                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429                         if (status)
430                                 goto exit;
431                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
433                                    type |       /* type */
434                                    enable_bit); /* enable/disable */
435                         break;
436                 }
437         case MAC_ADDR_TYPE_MULTI_FLTR:
438         default:
439                 netif_crit(qdev, ifup, qdev->ndev,
440                            "Address type %d not yet supported.\n", type);
441                 status = -EPERM;
442         }
443 exit:
444         return status;
445 }
446
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453         int status;
454         char zero_mac_addr[ETH_ALEN];
455         char *addr;
456
457         if (set) {
458                 addr = &qdev->current_mac_addr[0];
459                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460                              "Set Mac addr %pM\n", addr);
461         } else {
462                 memset(zero_mac_addr, 0, ETH_ALEN);
463                 addr = &zero_mac_addr[0];
464                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465                              "Clearing MAC address\n");
466         }
467         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468         if (status)
469                 return status;
470         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473         if (status)
474                 netif_err(qdev, ifup, qdev->ndev,
475                           "Failed to init mac address.\n");
476         return status;
477 }
478
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482         netif_carrier_on(qdev->ndev);
483         ql_set_mac_addr(qdev, 1);
484 }
485
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489         netif_carrier_off(qdev->ndev);
490         ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498         int status = 0;
499
500         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501         if (status)
502                 goto exit;
503
504         ql_write32(qdev, RT_IDX,
505                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507         if (status)
508                 goto exit;
509         *value = ql_read32(qdev, RT_DATA);
510 exit:
511         return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520                               int enable)
521 {
522         int status = -EINVAL; /* Return error if no mask match. */
523         u32 value = 0;
524
525         switch (mask) {
526         case RT_IDX_CAM_HIT:
527                 {
528                         value = RT_IDX_DST_CAM_Q |      /* dest */
529                             RT_IDX_TYPE_NICQ |  /* type */
530                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531                         break;
532                 }
533         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
534                 {
535                         value = RT_IDX_DST_DFLT_Q |     /* dest */
536                             RT_IDX_TYPE_NICQ |  /* type */
537                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538                         break;
539                 }
540         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
541                 {
542                         value = RT_IDX_DST_DFLT_Q |     /* dest */
543                             RT_IDX_TYPE_NICQ |  /* type */
544                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545                         break;
546                 }
547         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548                 {
549                         value = RT_IDX_DST_DFLT_Q | /* dest */
550                                 RT_IDX_TYPE_NICQ | /* type */
551                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
552                                 RT_IDX_IDX_SHIFT); /* index */
553                         break;
554                 }
555         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q | /* dest */
558                                 RT_IDX_TYPE_NICQ | /* type */
559                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560                                 RT_IDX_IDX_SHIFT); /* index */
561                         break;
562                 }
563         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
564                 {
565                         value = RT_IDX_DST_DFLT_Q |     /* dest */
566                             RT_IDX_TYPE_NICQ |  /* type */
567                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568                         break;
569                 }
570         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
571                 {
572                         value = RT_IDX_DST_DFLT_Q |     /* dest */
573                             RT_IDX_TYPE_NICQ |  /* type */
574                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575                         break;
576                 }
577         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
578                 {
579                         value = RT_IDX_DST_DFLT_Q |     /* dest */
580                             RT_IDX_TYPE_NICQ |  /* type */
581                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582                         break;
583                 }
584         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
585                 {
586                         value = RT_IDX_DST_RSS |        /* dest */
587                             RT_IDX_TYPE_NICQ |  /* type */
588                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589                         break;
590                 }
591         case 0:         /* Clear the E-bit on an entry. */
592                 {
593                         value = RT_IDX_DST_DFLT_Q |     /* dest */
594                             RT_IDX_TYPE_NICQ |  /* type */
595                             (index << RT_IDX_IDX_SHIFT);/* index */
596                         break;
597                 }
598         default:
599                 netif_err(qdev, ifup, qdev->ndev,
600                           "Mask type %d not yet supported.\n", mask);
601                 status = -EPERM;
602                 goto exit;
603         }
604
605         if (value) {
606                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607                 if (status)
608                         goto exit;
609                 value |= (enable ? RT_IDX_E : 0);
610                 ql_write32(qdev, RT_IDX, value);
611                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612         }
613 exit:
614         return status;
615 }
616
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes.  Once it hits zero we enable the interrupt.
632  */
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635         u32 var = 0;
636         unsigned long hw_flags = 0;
637         struct intr_context *ctx = qdev->intr_context + intr;
638
639         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640                 /* Always enable if we're MSIX multi interrupts and
641                  * it's not the default (zeroeth) interrupt.
642                  */
643                 ql_write32(qdev, INTR_EN,
644                            ctx->intr_en_mask);
645                 var = ql_read32(qdev, STS);
646                 return var;
647         }
648
649         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650         if (atomic_dec_and_test(&ctx->irq_cnt)) {
651                 ql_write32(qdev, INTR_EN,
652                            ctx->intr_en_mask);
653                 var = ql_read32(qdev, STS);
654         }
655         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656         return var;
657 }
658
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661         u32 var = 0;
662         struct intr_context *ctx;
663
664         /* HW disables for us if we're MSIX multi interrupts and
665          * it's not the default (zeroeth) interrupt.
666          */
667         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668                 return 0;
669
670         ctx = qdev->intr_context + intr;
671         spin_lock(&qdev->hw_lock);
672         if (!atomic_read(&ctx->irq_cnt)) {
673                 ql_write32(qdev, INTR_EN,
674                 ctx->intr_dis_mask);
675                 var = ql_read32(qdev, STS);
676         }
677         atomic_inc(&ctx->irq_cnt);
678         spin_unlock(&qdev->hw_lock);
679         return var;
680 }
681
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684         int i;
685         for (i = 0; i < qdev->intr_count; i++) {
686                 /* The enable call does a atomic_dec_and_test
687                  * and enables only if the result is zero.
688                  * So we precharge it here.
689                  */
690                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691                         i == 0))
692                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693                 ql_enable_completion_interrupt(qdev, i);
694         }
695
696 }
697
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700         int status, i;
701         u16 csum = 0;
702         __le16 *flash = (__le16 *)&qdev->flash;
703
704         status = strncmp((char *)&qdev->flash, str, 4);
705         if (status) {
706                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707                 return  status;
708         }
709
710         for (i = 0; i < size; i++)
711                 csum += le16_to_cpu(*flash++);
712
713         if (csum)
714                 netif_err(qdev, ifup, qdev->ndev,
715                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716
717         return csum;
718 }
719
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722         int status = 0;
723         /* wait for reg to come ready */
724         status = ql_wait_reg_rdy(qdev,
725                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726         if (status)
727                 goto exit;
728         /* set up for reg read */
729         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730         /* wait for reg to come ready */
731         status = ql_wait_reg_rdy(qdev,
732                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733         if (status)
734                 goto exit;
735          /* This data is stored on flash as an array of
736          * __le32.  Since ql_read32() returns cpu endian
737          * we need to swap it back.
738          */
739         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741         return status;
742 }
743
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746         u32 i, size;
747         int status;
748         __le32 *p = (__le32 *)&qdev->flash;
749         u32 offset;
750         u8 mac_addr[6];
751
752         /* Get flash offset for function and adjust
753          * for dword access.
754          */
755         if (!qdev->port)
756                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757         else
758                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761                 return -ETIMEDOUT;
762
763         size = sizeof(struct flash_params_8000) / sizeof(u32);
764         for (i = 0; i < size; i++, p++) {
765                 status = ql_read_flash_word(qdev, i+offset, p);
766                 if (status) {
767                         netif_err(qdev, ifup, qdev->ndev,
768                                   "Error reading flash.\n");
769                         goto exit;
770                 }
771         }
772
773         status = ql_validate_flash(qdev,
774                         sizeof(struct flash_params_8000) / sizeof(u16),
775                         "8000");
776         if (status) {
777                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778                 status = -EINVAL;
779                 goto exit;
780         }
781
782         /* Extract either manufacturer or BOFM modified
783          * MAC address.
784          */
785         if (qdev->flash.flash_params_8000.data_type1 == 2)
786                 memcpy(mac_addr,
787                         qdev->flash.flash_params_8000.mac_addr1,
788                         qdev->ndev->addr_len);
789         else
790                 memcpy(mac_addr,
791                         qdev->flash.flash_params_8000.mac_addr,
792                         qdev->ndev->addr_len);
793
794         if (!is_valid_ether_addr(mac_addr)) {
795                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796                 status = -EINVAL;
797                 goto exit;
798         }
799
800         memcpy(qdev->ndev->dev_addr,
801                 mac_addr,
802                 qdev->ndev->addr_len);
803
804 exit:
805         ql_sem_unlock(qdev, SEM_FLASH_MASK);
806         return status;
807 }
808
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811         int i;
812         int status;
813         __le32 *p = (__le32 *)&qdev->flash;
814         u32 offset = 0;
815         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816
817         /* Second function's parameters follow the first
818          * function's.
819          */
820         if (qdev->port)
821                 offset = size;
822
823         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824                 return -ETIMEDOUT;
825
826         for (i = 0; i < size; i++, p++) {
827                 status = ql_read_flash_word(qdev, i+offset, p);
828                 if (status) {
829                         netif_err(qdev, ifup, qdev->ndev,
830                                   "Error reading flash.\n");
831                         goto exit;
832                 }
833
834         }
835
836         status = ql_validate_flash(qdev,
837                         sizeof(struct flash_params_8012) / sizeof(u16),
838                         "8012");
839         if (status) {
840                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841                 status = -EINVAL;
842                 goto exit;
843         }
844
845         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846                 status = -EINVAL;
847                 goto exit;
848         }
849
850         memcpy(qdev->ndev->dev_addr,
851                 qdev->flash.flash_params_8012.mac_addr,
852                 qdev->ndev->addr_len);
853
854 exit:
855         ql_sem_unlock(qdev, SEM_FLASH_MASK);
856         return status;
857 }
858
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair.  Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865         int status;
866         /* wait for reg to come ready */
867         status = ql_wait_reg_rdy(qdev,
868                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869         if (status)
870                 return status;
871         /* write the data to the data reg */
872         ql_write32(qdev, XGMAC_DATA, data);
873         /* trigger the write */
874         ql_write32(qdev, XGMAC_ADDR, reg);
875         return status;
876 }
877
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair.  Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884         int status = 0;
885         /* wait for reg to come ready */
886         status = ql_wait_reg_rdy(qdev,
887                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888         if (status)
889                 goto exit;
890         /* set up for reg read */
891         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892         /* wait for reg to come ready */
893         status = ql_wait_reg_rdy(qdev,
894                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895         if (status)
896                 goto exit;
897         /* get the data */
898         *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900         return status;
901 }
902
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906         int status = 0;
907         u32 hi = 0;
908         u32 lo = 0;
909
910         status = ql_read_xgmac_reg(qdev, reg, &lo);
911         if (status)
912                 goto exit;
913
914         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915         if (status)
916                 goto exit;
917
918         *data = (u64) lo | ((u64) hi << 32);
919
920 exit:
921         return status;
922 }
923
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926         int status;
927         /*
928          * Get MPI firmware version for driver banner
929          * and ethool info.
930          */
931         status = ql_mb_about_fw(qdev);
932         if (status)
933                 goto exit;
934         status = ql_mb_get_fw_state(qdev);
935         if (status)
936                 goto exit;
937         /* Wake up a worker to get/set the TX/RX frame sizes. */
938         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940         return status;
941 }
942
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951         int status = 0;
952         u32 data;
953
954         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955                 /* Another function has the semaphore, so
956                  * wait for the port init bit to come ready.
957                  */
958                 netif_info(qdev, link, qdev->ndev,
959                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961                 if (status) {
962                         netif_crit(qdev, link, qdev->ndev,
963                                    "Port initialize timed out.\n");
964                 }
965                 return status;
966         }
967
968         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969         /* Set the core reset. */
970         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971         if (status)
972                 goto end;
973         data |= GLOBAL_CFG_RESET;
974         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975         if (status)
976                 goto end;
977
978         /* Clear the core reset and turn on jumbo for receiver. */
979         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
980         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
981         data |= GLOBAL_CFG_TX_STAT_EN;
982         data |= GLOBAL_CFG_RX_STAT_EN;
983         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984         if (status)
985                 goto end;
986
987         /* Enable transmitter, and clear it's reset. */
988         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989         if (status)
990                 goto end;
991         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
992         data |= TX_CFG_EN;      /* Enable the transmitter. */
993         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994         if (status)
995                 goto end;
996
997         /* Enable receiver and clear it's reset. */
998         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999         if (status)
1000                 goto end;
1001         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1002         data |= RX_CFG_EN;      /* Enable the receiver. */
1003         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004         if (status)
1005                 goto end;
1006
1007         /* Turn on jumbo. */
1008         status =
1009             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010         if (status)
1011                 goto end;
1012         status =
1013             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014         if (status)
1015                 goto end;
1016
1017         /* Signal to the world that the port is enabled.        */
1018         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021         return status;
1022 }
1023
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026         return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033         rx_ring->lbq_curr_idx++;
1034         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035                 rx_ring->lbq_curr_idx = 0;
1036         rx_ring->lbq_free_cnt++;
1037         return lbq_desc;
1038 }
1039
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041                 struct rx_ring *rx_ring)
1042 {
1043         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045         pci_dma_sync_single_for_cpu(qdev->pdev,
1046                                         dma_unmap_addr(lbq_desc, mapaddr),
1047                                     rx_ring->lbq_buf_size,
1048                                         PCI_DMA_FROMDEVICE);
1049
1050         /* If it's the last chunk of our master page then
1051          * we unmap it.
1052          */
1053         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054                                         == ql_lbq_block_size(qdev))
1055                 pci_unmap_page(qdev->pdev,
1056                                 lbq_desc->p.pg_chunk.map,
1057                                 ql_lbq_block_size(qdev),
1058                                 PCI_DMA_FROMDEVICE);
1059         return lbq_desc;
1060 }
1061
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066         rx_ring->sbq_curr_idx++;
1067         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068                 rx_ring->sbq_curr_idx = 0;
1069         rx_ring->sbq_free_cnt++;
1070         return sbq_desc;
1071 }
1072
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076         rx_ring->cnsmr_idx++;
1077         rx_ring->curr_entry++;
1078         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079                 rx_ring->cnsmr_idx = 0;
1080                 rx_ring->curr_entry = rx_ring->cq_base;
1081         }
1082 }
1083
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090                                                 struct bq_desc *lbq_desc)
1091 {
1092         if (!rx_ring->pg_chunk.page) {
1093                 u64 map;
1094                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095                                                 GFP_ATOMIC,
1096                                                 qdev->lbq_buf_order);
1097                 if (unlikely(!rx_ring->pg_chunk.page)) {
1098                         netif_err(qdev, drv, qdev->ndev,
1099                                   "page allocation failed.\n");
1100                         return -ENOMEM;
1101                 }
1102                 rx_ring->pg_chunk.offset = 0;
1103                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104                                         0, ql_lbq_block_size(qdev),
1105                                         PCI_DMA_FROMDEVICE);
1106                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107                         __free_pages(rx_ring->pg_chunk.page,
1108                                         qdev->lbq_buf_order);
1109                         netif_err(qdev, drv, qdev->ndev,
1110                                   "PCI mapping failed.\n");
1111                         return -ENOMEM;
1112                 }
1113                 rx_ring->pg_chunk.map = map;
1114                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115         }
1116
1117         /* Copy the current master pg_chunk info
1118          * to the current descriptor.
1119          */
1120         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122         /* Adjust the master page chunk for next
1123          * buffer get.
1124          */
1125         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127                 rx_ring->pg_chunk.page = NULL;
1128                 lbq_desc->p.pg_chunk.last_flag = 1;
1129         } else {
1130                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131                 get_page(rx_ring->pg_chunk.page);
1132                 lbq_desc->p.pg_chunk.last_flag = 0;
1133         }
1134         return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139         u32 clean_idx = rx_ring->lbq_clean_idx;
1140         u32 start_idx = clean_idx;
1141         struct bq_desc *lbq_desc;
1142         u64 map;
1143         int i;
1144
1145         while (rx_ring->lbq_free_cnt > 32) {
1146                 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148                                      "lbq: try cleaning clean_idx = %d.\n",
1149                                      clean_idx);
1150                         lbq_desc = &rx_ring->lbq[clean_idx];
1151                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152                                 rx_ring->lbq_clean_idx = clean_idx;
1153                                 netif_err(qdev, ifup, qdev->ndev,
1154                                                 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155                                                 i, clean_idx);
1156                                 return;
1157                         }
1158
1159                         map = lbq_desc->p.pg_chunk.map +
1160                                 lbq_desc->p.pg_chunk.offset;
1161                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162                         dma_unmap_len_set(lbq_desc, maplen,
1163                                         rx_ring->lbq_buf_size);
1164                                 *lbq_desc->addr = cpu_to_le64(map);
1165
1166                         pci_dma_sync_single_for_device(qdev->pdev, map,
1167                                                 rx_ring->lbq_buf_size,
1168                                                 PCI_DMA_FROMDEVICE);
1169                         clean_idx++;
1170                         if (clean_idx == rx_ring->lbq_len)
1171                                 clean_idx = 0;
1172                 }
1173
1174                 rx_ring->lbq_clean_idx = clean_idx;
1175                 rx_ring->lbq_prod_idx += 16;
1176                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177                         rx_ring->lbq_prod_idx = 0;
1178                 rx_ring->lbq_free_cnt -= 16;
1179         }
1180
1181         if (start_idx != clean_idx) {
1182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                              "lbq: updating prod idx = %d.\n",
1184                              rx_ring->lbq_prod_idx);
1185                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186                                 rx_ring->lbq_prod_idx_db_reg);
1187         }
1188 }
1189
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193         u32 clean_idx = rx_ring->sbq_clean_idx;
1194         u32 start_idx = clean_idx;
1195         struct bq_desc *sbq_desc;
1196         u64 map;
1197         int i;
1198
1199         while (rx_ring->sbq_free_cnt > 16) {
1200                 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201                         sbq_desc = &rx_ring->sbq[clean_idx];
1202                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203                                      "sbq: try cleaning clean_idx = %d.\n",
1204                                      clean_idx);
1205                         if (sbq_desc->p.skb == NULL) {
1206                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1207                                              qdev->ndev,
1208                                              "sbq: getting new skb for index %d.\n",
1209                                              sbq_desc->index);
1210                                 sbq_desc->p.skb =
1211                                     netdev_alloc_skb(qdev->ndev,
1212                                                      SMALL_BUFFER_SIZE);
1213                                 if (sbq_desc->p.skb == NULL) {
1214                                         rx_ring->sbq_clean_idx = clean_idx;
1215                                         return;
1216                                 }
1217                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1218                                 map = pci_map_single(qdev->pdev,
1219                                                      sbq_desc->p.skb->data,
1220                                                      rx_ring->sbq_buf_size,
1221                                                      PCI_DMA_FROMDEVICE);
1222                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1223                                         netif_err(qdev, ifup, qdev->ndev,
1224                                                   "PCI mapping failed.\n");
1225                                         rx_ring->sbq_clean_idx = clean_idx;
1226                                         dev_kfree_skb_any(sbq_desc->p.skb);
1227                                         sbq_desc->p.skb = NULL;
1228                                         return;
1229                                 }
1230                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1231                                 dma_unmap_len_set(sbq_desc, maplen,
1232                                                   rx_ring->sbq_buf_size);
1233                                 *sbq_desc->addr = cpu_to_le64(map);
1234                         }
1235
1236                         clean_idx++;
1237                         if (clean_idx == rx_ring->sbq_len)
1238                                 clean_idx = 0;
1239                 }
1240                 rx_ring->sbq_clean_idx = clean_idx;
1241                 rx_ring->sbq_prod_idx += 16;
1242                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1243                         rx_ring->sbq_prod_idx = 0;
1244                 rx_ring->sbq_free_cnt -= 16;
1245         }
1246
1247         if (start_idx != clean_idx) {
1248                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1249                              "sbq: updating prod idx = %d.\n",
1250                              rx_ring->sbq_prod_idx);
1251                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1252                                 rx_ring->sbq_prod_idx_db_reg);
1253         }
1254 }
1255
1256 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1257                                     struct rx_ring *rx_ring)
1258 {
1259         ql_update_sbq(qdev, rx_ring);
1260         ql_update_lbq(qdev, rx_ring);
1261 }
1262
1263 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1264  * fails at some stage, or from the interrupt when a tx completes.
1265  */
1266 static void ql_unmap_send(struct ql_adapter *qdev,
1267                           struct tx_ring_desc *tx_ring_desc, int mapped)
1268 {
1269         int i;
1270         for (i = 0; i < mapped; i++) {
1271                 if (i == 0 || (i == 7 && mapped > 7)) {
1272                         /*
1273                          * Unmap the skb->data area, or the
1274                          * external sglist (AKA the Outbound
1275                          * Address List (OAL)).
1276                          * If its the zeroeth element, then it's
1277                          * the skb->data area.  If it's the 7th
1278                          * element and there is more than 6 frags,
1279                          * then its an OAL.
1280                          */
1281                         if (i == 7) {
1282                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1283                                              qdev->ndev,
1284                                              "unmapping OAL area.\n");
1285                         }
1286                         pci_unmap_single(qdev->pdev,
1287                                          dma_unmap_addr(&tx_ring_desc->map[i],
1288                                                         mapaddr),
1289                                          dma_unmap_len(&tx_ring_desc->map[i],
1290                                                        maplen),
1291                                          PCI_DMA_TODEVICE);
1292                 } else {
1293                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1294                                      "unmapping frag %d.\n", i);
1295                         pci_unmap_page(qdev->pdev,
1296                                        dma_unmap_addr(&tx_ring_desc->map[i],
1297                                                       mapaddr),
1298                                        dma_unmap_len(&tx_ring_desc->map[i],
1299                                                      maplen), PCI_DMA_TODEVICE);
1300                 }
1301         }
1302
1303 }
1304
1305 /* Map the buffers for this transmit.  This will return
1306  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1307  */
1308 static int ql_map_send(struct ql_adapter *qdev,
1309                        struct ob_mac_iocb_req *mac_iocb_ptr,
1310                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1311 {
1312         int len = skb_headlen(skb);
1313         dma_addr_t map;
1314         int frag_idx, err, map_idx = 0;
1315         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1316         int frag_cnt = skb_shinfo(skb)->nr_frags;
1317
1318         if (frag_cnt) {
1319                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1320                              "frag_cnt = %d.\n", frag_cnt);
1321         }
1322         /*
1323          * Map the skb buffer first.
1324          */
1325         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327         err = pci_dma_mapping_error(qdev->pdev, map);
1328         if (err) {
1329                 netif_err(qdev, tx_queued, qdev->ndev,
1330                           "PCI mapping failed with error: %d\n", err);
1331
1332                 return NETDEV_TX_BUSY;
1333         }
1334
1335         tbd->len = cpu_to_le32(len);
1336         tbd->addr = cpu_to_le64(map);
1337         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339         map_idx++;
1340
1341         /*
1342          * This loop fills the remainder of the 8 address descriptors
1343          * in the IOCB.  If there are more than 7 fragments, then the
1344          * eighth address desc will point to an external list (OAL).
1345          * When this happens, the remainder of the frags will be stored
1346          * in this list.
1347          */
1348         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350                 tbd++;
1351                 if (frag_idx == 6 && frag_cnt > 7) {
1352                         /* Let's tack on an sglist.
1353                          * Our control block will now
1354                          * look like this:
1355                          * iocb->seg[0] = skb->data
1356                          * iocb->seg[1] = frag[0]
1357                          * iocb->seg[2] = frag[1]
1358                          * iocb->seg[3] = frag[2]
1359                          * iocb->seg[4] = frag[3]
1360                          * iocb->seg[5] = frag[4]
1361                          * iocb->seg[6] = frag[5]
1362                          * iocb->seg[7] = ptr to OAL (external sglist)
1363                          * oal->seg[0] = frag[6]
1364                          * oal->seg[1] = frag[7]
1365                          * oal->seg[2] = frag[8]
1366                          * oal->seg[3] = frag[9]
1367                          * oal->seg[4] = frag[10]
1368                          *      etc...
1369                          */
1370                         /* Tack on the OAL in the eighth segment of IOCB. */
1371                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372                                              sizeof(struct oal),
1373                                              PCI_DMA_TODEVICE);
1374                         err = pci_dma_mapping_error(qdev->pdev, map);
1375                         if (err) {
1376                                 netif_err(qdev, tx_queued, qdev->ndev,
1377                                           "PCI mapping outbound address list with error: %d\n",
1378                                           err);
1379                                 goto map_error;
1380                         }
1381
1382                         tbd->addr = cpu_to_le64(map);
1383                         /*
1384                          * The length is the number of fragments
1385                          * that remain to be mapped times the length
1386                          * of our sglist (OAL).
1387                          */
1388                         tbd->len =
1389                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1390                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1391                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392                                            map);
1393                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394                                           sizeof(struct oal));
1395                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396                         map_idx++;
1397                 }
1398
1399                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1400                                        DMA_TO_DEVICE);
1401
1402                 err = dma_mapping_error(&qdev->pdev->dev, map);
1403                 if (err) {
1404                         netif_err(qdev, tx_queued, qdev->ndev,
1405                                   "PCI mapping frags failed with error: %d.\n",
1406                                   err);
1407                         goto map_error;
1408                 }
1409
1410                 tbd->addr = cpu_to_le64(map);
1411                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1412                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1413                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1414                                   skb_frag_size(frag));
1415
1416         }
1417         /* Save the number of segments we've mapped. */
1418         tx_ring_desc->map_cnt = map_idx;
1419         /* Terminate the last segment. */
1420         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1421         return NETDEV_TX_OK;
1422
1423 map_error:
1424         /*
1425          * If the first frag mapping failed, then i will be zero.
1426          * This causes the unmap of the skb->data area.  Otherwise
1427          * we pass in the number of frags that mapped successfully
1428          * so they can be umapped.
1429          */
1430         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1431         return NETDEV_TX_BUSY;
1432 }
1433
1434 /* Categorizing receive firmware frame errors */
1435 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1436 {
1437         struct nic_stats *stats = &qdev->nic_stats;
1438
1439         stats->rx_err_count++;
1440
1441         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1442         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1443                 stats->rx_code_err++;
1444                 break;
1445         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1446                 stats->rx_oversize_err++;
1447                 break;
1448         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1449                 stats->rx_undersize_err++;
1450                 break;
1451         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1452                 stats->rx_preamble_err++;
1453                 break;
1454         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1455                 stats->rx_frame_len_err++;
1456                 break;
1457         case IB_MAC_IOCB_RSP_ERR_CRC:
1458                 stats->rx_crc_err++;
1459         default:
1460                 break;
1461         }
1462 }
1463
1464 /* Process an inbound completion from an rx ring. */
1465 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1466                                         struct rx_ring *rx_ring,
1467                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1468                                         u32 length,
1469                                         u16 vlan_id)
1470 {
1471         struct sk_buff *skb;
1472         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1473         struct napi_struct *napi = &rx_ring->napi;
1474
1475         napi->dev = qdev->ndev;
1476
1477         skb = napi_get_frags(napi);
1478         if (!skb) {
1479                 netif_err(qdev, drv, qdev->ndev,
1480                           "Couldn't get an skb, exiting.\n");
1481                 rx_ring->rx_dropped++;
1482                 put_page(lbq_desc->p.pg_chunk.page);
1483                 return;
1484         }
1485         prefetch(lbq_desc->p.pg_chunk.va);
1486         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1487                              lbq_desc->p.pg_chunk.page,
1488                              lbq_desc->p.pg_chunk.offset,
1489                              length);
1490
1491         skb->len += length;
1492         skb->data_len += length;
1493         skb->truesize += length;
1494         skb_shinfo(skb)->nr_frags++;
1495
1496         rx_ring->rx_packets++;
1497         rx_ring->rx_bytes += length;
1498         skb->ip_summed = CHECKSUM_UNNECESSARY;
1499         skb_record_rx_queue(skb, rx_ring->cq_id);
1500         if (vlan_id != 0xffff)
1501                 __vlan_hwaccel_put_tag(skb, vlan_id);
1502         napi_gro_frags(napi);
1503 }
1504
1505 /* Process an inbound completion from an rx ring. */
1506 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1507                                         struct rx_ring *rx_ring,
1508                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1509                                         u32 length,
1510                                         u16 vlan_id)
1511 {
1512         struct net_device *ndev = qdev->ndev;
1513         struct sk_buff *skb = NULL;
1514         void *addr;
1515         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1516         struct napi_struct *napi = &rx_ring->napi;
1517
1518         skb = netdev_alloc_skb(ndev, length);
1519         if (!skb) {
1520                 rx_ring->rx_dropped++;
1521                 put_page(lbq_desc->p.pg_chunk.page);
1522                 return;
1523         }
1524
1525         addr = lbq_desc->p.pg_chunk.va;
1526         prefetch(addr);
1527
1528         /* The max framesize filter on this chip is set higher than
1529          * MTU since FCoE uses 2k frames.
1530          */
1531         if (skb->len > ndev->mtu + ETH_HLEN) {
1532                 netif_err(qdev, drv, qdev->ndev,
1533                           "Segment too small, dropping.\n");
1534                 rx_ring->rx_dropped++;
1535                 goto err_out;
1536         }
1537         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1538         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1539                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1540                      length);
1541         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1542                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1543                                 length-ETH_HLEN);
1544         skb->len += length-ETH_HLEN;
1545         skb->data_len += length-ETH_HLEN;
1546         skb->truesize += length-ETH_HLEN;
1547
1548         rx_ring->rx_packets++;
1549         rx_ring->rx_bytes += skb->len;
1550         skb->protocol = eth_type_trans(skb, ndev);
1551         skb_checksum_none_assert(skb);
1552
1553         if ((ndev->features & NETIF_F_RXCSUM) &&
1554                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1555                 /* TCP frame. */
1556                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1557                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1558                                      "TCP checksum done!\n");
1559                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1560                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1561                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1562                         /* Unfragmented ipv4 UDP frame. */
1563                         struct iphdr *iph =
1564                                 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1565                         if (!(iph->frag_off &
1566                                 htons(IP_MF|IP_OFFSET))) {
1567                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1568                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1569                                              qdev->ndev,
1570                                              "UDP checksum done!\n");
1571                         }
1572                 }
1573         }
1574
1575         skb_record_rx_queue(skb, rx_ring->cq_id);
1576         if (vlan_id != 0xffff)
1577                 __vlan_hwaccel_put_tag(skb, vlan_id);
1578         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1579                 napi_gro_receive(napi, skb);
1580         else
1581                 netif_receive_skb(skb);
1582         return;
1583 err_out:
1584         dev_kfree_skb_any(skb);
1585         put_page(lbq_desc->p.pg_chunk.page);
1586 }
1587
1588 /* Process an inbound completion from an rx ring. */
1589 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1590                                         struct rx_ring *rx_ring,
1591                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1592                                         u32 length,
1593                                         u16 vlan_id)
1594 {
1595         struct net_device *ndev = qdev->ndev;
1596         struct sk_buff *skb = NULL;
1597         struct sk_buff *new_skb = NULL;
1598         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1599
1600         skb = sbq_desc->p.skb;
1601         /* Allocate new_skb and copy */
1602         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1603         if (new_skb == NULL) {
1604                 rx_ring->rx_dropped++;
1605                 return;
1606         }
1607         skb_reserve(new_skb, NET_IP_ALIGN);
1608         memcpy(skb_put(new_skb, length), skb->data, length);
1609         skb = new_skb;
1610
1611         /* loopback self test for ethtool */
1612         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1613                 ql_check_lb_frame(qdev, skb);
1614                 dev_kfree_skb_any(skb);
1615                 return;
1616         }
1617
1618         /* The max framesize filter on this chip is set higher than
1619          * MTU since FCoE uses 2k frames.
1620          */
1621         if (skb->len > ndev->mtu + ETH_HLEN) {
1622                 dev_kfree_skb_any(skb);
1623                 rx_ring->rx_dropped++;
1624                 return;
1625         }
1626
1627         prefetch(skb->data);
1628         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1629                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630                              "%s Multicast.\n",
1631                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1632                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1633                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1634                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1635                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1636                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1637         }
1638         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1639                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1640                              "Promiscuous Packet.\n");
1641
1642         rx_ring->rx_packets++;
1643         rx_ring->rx_bytes += skb->len;
1644         skb->protocol = eth_type_trans(skb, ndev);
1645         skb_checksum_none_assert(skb);
1646
1647         /* If rx checksum is on, and there are no
1648          * csum or frame errors.
1649          */
1650         if ((ndev->features & NETIF_F_RXCSUM) &&
1651                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1652                 /* TCP frame. */
1653                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1654                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1655                                      "TCP checksum done!\n");
1656                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1657                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1658                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1659                         /* Unfragmented ipv4 UDP frame. */
1660                         struct iphdr *iph = (struct iphdr *) skb->data;
1661                         if (!(iph->frag_off &
1662                                 htons(IP_MF|IP_OFFSET))) {
1663                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1664                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1665                                              qdev->ndev,
1666                                              "UDP checksum done!\n");
1667                         }
1668                 }
1669         }
1670
1671         skb_record_rx_queue(skb, rx_ring->cq_id);
1672         if (vlan_id != 0xffff)
1673                 __vlan_hwaccel_put_tag(skb, vlan_id);
1674         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1675                 napi_gro_receive(&rx_ring->napi, skb);
1676         else
1677                 netif_receive_skb(skb);
1678 }
1679
1680 static void ql_realign_skb(struct sk_buff *skb, int len)
1681 {
1682         void *temp_addr = skb->data;
1683
1684         /* Undo the skb_reserve(skb,32) we did before
1685          * giving to hardware, and realign data on
1686          * a 2-byte boundary.
1687          */
1688         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1689         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1690         skb_copy_to_linear_data(skb, temp_addr,
1691                 (unsigned int)len);
1692 }
1693
1694 /*
1695  * This function builds an skb for the given inbound
1696  * completion.  It will be rewritten for readability in the near
1697  * future, but for not it works well.
1698  */
1699 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1700                                        struct rx_ring *rx_ring,
1701                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1702 {
1703         struct bq_desc *lbq_desc;
1704         struct bq_desc *sbq_desc;
1705         struct sk_buff *skb = NULL;
1706         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1707        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1708
1709         /*
1710          * Handle the header buffer if present.
1711          */
1712         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1713             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1714                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715                              "Header of %d bytes in small buffer.\n", hdr_len);
1716                 /*
1717                  * Headers fit nicely into a small buffer.
1718                  */
1719                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1720                 pci_unmap_single(qdev->pdev,
1721                                 dma_unmap_addr(sbq_desc, mapaddr),
1722                                 dma_unmap_len(sbq_desc, maplen),
1723                                 PCI_DMA_FROMDEVICE);
1724                 skb = sbq_desc->p.skb;
1725                 ql_realign_skb(skb, hdr_len);
1726                 skb_put(skb, hdr_len);
1727                 sbq_desc->p.skb = NULL;
1728         }
1729
1730         /*
1731          * Handle the data buffer(s).
1732          */
1733         if (unlikely(!length)) {        /* Is there data too? */
1734                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1735                              "No Data buffer in this packet.\n");
1736                 return skb;
1737         }
1738
1739         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1740                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742                                      "Headers in small, data of %d bytes in small, combine them.\n",
1743                                      length);
1744                         /*
1745                          * Data is less than small buffer size so it's
1746                          * stuffed in a small buffer.
1747                          * For this case we append the data
1748                          * from the "data" small buffer to the "header" small
1749                          * buffer.
1750                          */
1751                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1752                         pci_dma_sync_single_for_cpu(qdev->pdev,
1753                                                     dma_unmap_addr
1754                                                     (sbq_desc, mapaddr),
1755                                                     dma_unmap_len
1756                                                     (sbq_desc, maplen),
1757                                                     PCI_DMA_FROMDEVICE);
1758                         memcpy(skb_put(skb, length),
1759                                sbq_desc->p.skb->data, length);
1760                         pci_dma_sync_single_for_device(qdev->pdev,
1761                                                        dma_unmap_addr
1762                                                        (sbq_desc,
1763                                                         mapaddr),
1764                                                        dma_unmap_len
1765                                                        (sbq_desc,
1766                                                         maplen),
1767                                                        PCI_DMA_FROMDEVICE);
1768                 } else {
1769                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770                                      "%d bytes in a single small buffer.\n",
1771                                      length);
1772                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1773                         skb = sbq_desc->p.skb;
1774                         ql_realign_skb(skb, length);
1775                         skb_put(skb, length);
1776                         pci_unmap_single(qdev->pdev,
1777                                          dma_unmap_addr(sbq_desc,
1778                                                         mapaddr),
1779                                          dma_unmap_len(sbq_desc,
1780                                                        maplen),
1781                                          PCI_DMA_FROMDEVICE);
1782                         sbq_desc->p.skb = NULL;
1783                 }
1784         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1785                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1786                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1787                                      "Header in small, %d bytes in large. Chain large to small!\n",
1788                                      length);
1789                         /*
1790                          * The data is in a single large buffer.  We
1791                          * chain it to the header buffer's skb and let
1792                          * it rip.
1793                          */
1794                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1795                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1796                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1797                                      lbq_desc->p.pg_chunk.offset, length);
1798                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1799                                                 lbq_desc->p.pg_chunk.offset,
1800                                                 length);
1801                         skb->len += length;
1802                         skb->data_len += length;
1803                         skb->truesize += length;
1804                 } else {
1805                         /*
1806                          * The headers and data are in a single large buffer. We
1807                          * copy it to a new skb and let it go. This can happen with
1808                          * jumbo mtu on a non-TCP/UDP frame.
1809                          */
1810                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1811                         skb = netdev_alloc_skb(qdev->ndev, length);
1812                         if (skb == NULL) {
1813                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1814                                              "No skb available, drop the packet.\n");
1815                                 return NULL;
1816                         }
1817                         pci_unmap_page(qdev->pdev,
1818                                        dma_unmap_addr(lbq_desc,
1819                                                       mapaddr),
1820                                        dma_unmap_len(lbq_desc, maplen),
1821                                        PCI_DMA_FROMDEVICE);
1822                         skb_reserve(skb, NET_IP_ALIGN);
1823                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1824                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1825                                      length);
1826                         skb_fill_page_desc(skb, 0,
1827                                                 lbq_desc->p.pg_chunk.page,
1828                                                 lbq_desc->p.pg_chunk.offset,
1829                                                 length);
1830                         skb->len += length;
1831                         skb->data_len += length;
1832                         skb->truesize += length;
1833                         length -= length;
1834                         __pskb_pull_tail(skb,
1835                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1836                                 VLAN_ETH_HLEN : ETH_HLEN);
1837                 }
1838         } else {
1839                 /*
1840                  * The data is in a chain of large buffers
1841                  * pointed to by a small buffer.  We loop
1842                  * thru and chain them to the our small header
1843                  * buffer's skb.
1844                  * frags:  There are 18 max frags and our small
1845                  *         buffer will hold 32 of them. The thing is,
1846                  *         we'll use 3 max for our 9000 byte jumbo
1847                  *         frames.  If the MTU goes up we could
1848                  *          eventually be in trouble.
1849                  */
1850                 int size, i = 0;
1851                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1852                 pci_unmap_single(qdev->pdev,
1853                                  dma_unmap_addr(sbq_desc, mapaddr),
1854                                  dma_unmap_len(sbq_desc, maplen),
1855                                  PCI_DMA_FROMDEVICE);
1856                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1857                         /*
1858                          * This is an non TCP/UDP IP frame, so
1859                          * the headers aren't split into a small
1860                          * buffer.  We have to use the small buffer
1861                          * that contains our sg list as our skb to
1862                          * send upstairs. Copy the sg list here to
1863                          * a local buffer and use it to find the
1864                          * pages to chain.
1865                          */
1866                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867                                      "%d bytes of headers & data in chain of large.\n",
1868                                      length);
1869                         skb = sbq_desc->p.skb;
1870                         sbq_desc->p.skb = NULL;
1871                         skb_reserve(skb, NET_IP_ALIGN);
1872                 }
1873                 while (length > 0) {
1874                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1875                         size = (length < rx_ring->lbq_buf_size) ? length :
1876                                 rx_ring->lbq_buf_size;
1877
1878                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1879                                      "Adding page %d to skb for %d bytes.\n",
1880                                      i, size);
1881                         skb_fill_page_desc(skb, i,
1882                                                 lbq_desc->p.pg_chunk.page,
1883                                                 lbq_desc->p.pg_chunk.offset,
1884                                                 size);
1885                         skb->len += size;
1886                         skb->data_len += size;
1887                         skb->truesize += size;
1888                         length -= size;
1889                         i++;
1890                 }
1891                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1892                                 VLAN_ETH_HLEN : ETH_HLEN);
1893         }
1894         return skb;
1895 }
1896
1897 /* Process an inbound completion from an rx ring. */
1898 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1899                                    struct rx_ring *rx_ring,
1900                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1901                                    u16 vlan_id)
1902 {
1903         struct net_device *ndev = qdev->ndev;
1904         struct sk_buff *skb = NULL;
1905
1906         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1907
1908         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1909         if (unlikely(!skb)) {
1910                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1911                              "No skb available, drop packet.\n");
1912                 rx_ring->rx_dropped++;
1913                 return;
1914         }
1915
1916         /* The max framesize filter on this chip is set higher than
1917          * MTU since FCoE uses 2k frames.
1918          */
1919         if (skb->len > ndev->mtu + ETH_HLEN) {
1920                 dev_kfree_skb_any(skb);
1921                 rx_ring->rx_dropped++;
1922                 return;
1923         }
1924
1925         /* loopback self test for ethtool */
1926         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1927                 ql_check_lb_frame(qdev, skb);
1928                 dev_kfree_skb_any(skb);
1929                 return;
1930         }
1931
1932         prefetch(skb->data);
1933         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1934                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1935                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1936                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1937                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1938                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1939                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1940                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1941                 rx_ring->rx_multicast++;
1942         }
1943         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1944                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1945                              "Promiscuous Packet.\n");
1946         }
1947
1948         skb->protocol = eth_type_trans(skb, ndev);
1949         skb_checksum_none_assert(skb);
1950
1951         /* If rx checksum is on, and there are no
1952          * csum or frame errors.
1953          */
1954         if ((ndev->features & NETIF_F_RXCSUM) &&
1955                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1956                 /* TCP frame. */
1957                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1958                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1959                                      "TCP checksum done!\n");
1960                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1961                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1962                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1963                 /* Unfragmented ipv4 UDP frame. */
1964                         struct iphdr *iph = (struct iphdr *) skb->data;
1965                         if (!(iph->frag_off &
1966                                 htons(IP_MF|IP_OFFSET))) {
1967                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1968                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1969                                              "TCP checksum done!\n");
1970                         }
1971                 }
1972         }
1973
1974         rx_ring->rx_packets++;
1975         rx_ring->rx_bytes += skb->len;
1976         skb_record_rx_queue(skb, rx_ring->cq_id);
1977         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1978                 __vlan_hwaccel_put_tag(skb, vlan_id);
1979         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1980                 napi_gro_receive(&rx_ring->napi, skb);
1981         else
1982                 netif_receive_skb(skb);
1983 }
1984
1985 /* Process an inbound completion from an rx ring. */
1986 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1987                                         struct rx_ring *rx_ring,
1988                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
1989 {
1990         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1991         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1992                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1993                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1994
1995         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1996
1997         /* Frame error, so drop the packet. */
1998         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1999                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2000                 return (unsigned long)length;
2001         }
2002
2003         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2004                 /* The data and headers are split into
2005                  * separate buffers.
2006                  */
2007                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2008                                                 vlan_id);
2009         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2010                 /* The data fit in a single small buffer.
2011                  * Allocate a new skb, copy the data and
2012                  * return the buffer to the free pool.
2013                  */
2014                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2015                                                 length, vlan_id);
2016         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2017                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2018                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2019                 /* TCP packet in a page chunk that's been checksummed.
2020                  * Tack it on to our GRO skb and let it go.
2021                  */
2022                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2023                                                 length, vlan_id);
2024         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2025                 /* Non-TCP packet in a page chunk. Allocate an
2026                  * skb, tack it on frags, and send it up.
2027                  */
2028                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2029                                                 length, vlan_id);
2030         } else {
2031                 /* Non-TCP/UDP large frames that span multiple buffers
2032                  * can be processed corrrectly by the split frame logic.
2033                  */
2034                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2035                                                 vlan_id);
2036         }
2037
2038         return (unsigned long)length;
2039 }
2040
2041 /* Process an outbound completion from an rx ring. */
2042 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2043                                    struct ob_mac_iocb_rsp *mac_rsp)
2044 {
2045         struct tx_ring *tx_ring;
2046         struct tx_ring_desc *tx_ring_desc;
2047
2048         QL_DUMP_OB_MAC_RSP(mac_rsp);
2049         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2050         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2051         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2052         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2053         tx_ring->tx_packets++;
2054         dev_kfree_skb(tx_ring_desc->skb);
2055         tx_ring_desc->skb = NULL;
2056
2057         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2058                                         OB_MAC_IOCB_RSP_S |
2059                                         OB_MAC_IOCB_RSP_L |
2060                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2061                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2062                         netif_warn(qdev, tx_done, qdev->ndev,
2063                                    "Total descriptor length did not match transfer length.\n");
2064                 }
2065                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2066                         netif_warn(qdev, tx_done, qdev->ndev,
2067                                    "Frame too short to be valid, not sent.\n");
2068                 }
2069                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2070                         netif_warn(qdev, tx_done, qdev->ndev,
2071                                    "Frame too long, but sent anyway.\n");
2072                 }
2073                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2074                         netif_warn(qdev, tx_done, qdev->ndev,
2075                                    "PCI backplane error. Frame not sent.\n");
2076                 }
2077         }
2078         atomic_inc(&tx_ring->tx_count);
2079 }
2080
2081 /* Fire up a handler to reset the MPI processor. */
2082 void ql_queue_fw_error(struct ql_adapter *qdev)
2083 {
2084         ql_link_off(qdev);
2085         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2086 }
2087
2088 void ql_queue_asic_error(struct ql_adapter *qdev)
2089 {
2090         ql_link_off(qdev);
2091         ql_disable_interrupts(qdev);
2092         /* Clear adapter up bit to signal the recovery
2093          * process that it shouldn't kill the reset worker
2094          * thread
2095          */
2096         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2097         /* Set asic recovery bit to indicate reset process that we are
2098          * in fatal error recovery process rather than normal close
2099          */
2100         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2101         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2102 }
2103
2104 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2105                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2106 {
2107         switch (ib_ae_rsp->event) {
2108         case MGMT_ERR_EVENT:
2109                 netif_err(qdev, rx_err, qdev->ndev,
2110                           "Management Processor Fatal Error.\n");
2111                 ql_queue_fw_error(qdev);
2112                 return;
2113
2114         case CAM_LOOKUP_ERR_EVENT:
2115                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2116                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2117                 ql_queue_asic_error(qdev);
2118                 return;
2119
2120         case SOFT_ECC_ERROR_EVENT:
2121                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2122                 ql_queue_asic_error(qdev);
2123                 break;
2124
2125         case PCI_ERR_ANON_BUF_RD:
2126                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2127                                         "anonymous buffers from rx_ring %d.\n",
2128                                         ib_ae_rsp->q_id);
2129                 ql_queue_asic_error(qdev);
2130                 break;
2131
2132         default:
2133                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2134                           ib_ae_rsp->event);
2135                 ql_queue_asic_error(qdev);
2136                 break;
2137         }
2138 }
2139
2140 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2141 {
2142         struct ql_adapter *qdev = rx_ring->qdev;
2143         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2144         struct ob_mac_iocb_rsp *net_rsp = NULL;
2145         int count = 0;
2146
2147         struct tx_ring *tx_ring;
2148         /* While there are entries in the completion queue. */
2149         while (prod != rx_ring->cnsmr_idx) {
2150
2151                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2152                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2153                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2154
2155                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2156                 rmb();
2157                 switch (net_rsp->opcode) {
2158
2159                 case OPCODE_OB_MAC_TSO_IOCB:
2160                 case OPCODE_OB_MAC_IOCB:
2161                         ql_process_mac_tx_intr(qdev, net_rsp);
2162                         break;
2163                 default:
2164                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2165                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2166                                      net_rsp->opcode);
2167                 }
2168                 count++;
2169                 ql_update_cq(rx_ring);
2170                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2171         }
2172         if (!net_rsp)
2173                 return 0;
2174         ql_write_cq_idx(rx_ring);
2175         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2176         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2177                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2178                         /*
2179                          * The queue got stopped because the tx_ring was full.
2180                          * Wake it up, because it's now at least 25% empty.
2181                          */
2182                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2183         }
2184
2185         return count;
2186 }
2187
2188 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2189 {
2190         struct ql_adapter *qdev = rx_ring->qdev;
2191         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2192         struct ql_net_rsp_iocb *net_rsp;
2193         int count = 0;
2194
2195         /* While there are entries in the completion queue. */
2196         while (prod != rx_ring->cnsmr_idx) {
2197
2198                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2201
2202                 net_rsp = rx_ring->curr_entry;
2203                 rmb();
2204                 switch (net_rsp->opcode) {
2205                 case OPCODE_IB_MAC_IOCB:
2206                         ql_process_mac_rx_intr(qdev, rx_ring,
2207                                                (struct ib_mac_iocb_rsp *)
2208                                                net_rsp);
2209                         break;
2210
2211                 case OPCODE_IB_AE_IOCB:
2212                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2213                                                 net_rsp);
2214                         break;
2215                 default:
2216                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2218                                      net_rsp->opcode);
2219                         break;
2220                 }
2221                 count++;
2222                 ql_update_cq(rx_ring);
2223                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224                 if (count == budget)
2225                         break;
2226         }
2227         ql_update_buffer_queues(qdev, rx_ring);
2228         ql_write_cq_idx(rx_ring);
2229         return count;
2230 }
2231
2232 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2233 {
2234         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2235         struct ql_adapter *qdev = rx_ring->qdev;
2236         struct rx_ring *trx_ring;
2237         int i, work_done = 0;
2238         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2239
2240         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2241                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2242
2243         /* Service the TX rings first.  They start
2244          * right after the RSS rings. */
2245         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2246                 trx_ring = &qdev->rx_ring[i];
2247                 /* If this TX completion ring belongs to this vector and
2248                  * it's not empty then service it.
2249                  */
2250                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2251                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2252                                         trx_ring->cnsmr_idx)) {
2253                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2254                                      "%s: Servicing TX completion ring %d.\n",
2255                                      __func__, trx_ring->cq_id);
2256                         ql_clean_outbound_rx_ring(trx_ring);
2257                 }
2258         }
2259
2260         /*
2261          * Now service the RSS ring if it's active.
2262          */
2263         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2264                                         rx_ring->cnsmr_idx) {
2265                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2266                              "%s: Servicing RX completion ring %d.\n",
2267                              __func__, rx_ring->cq_id);
2268                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2269         }
2270
2271         if (work_done < budget) {
2272                 napi_complete(napi);
2273                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2274         }
2275         return work_done;
2276 }
2277
2278 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2279 {
2280         struct ql_adapter *qdev = netdev_priv(ndev);
2281
2282         if (features & NETIF_F_HW_VLAN_RX) {
2283                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2284                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2285         } else {
2286                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2287         }
2288 }
2289
2290 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2291         netdev_features_t features)
2292 {
2293         /*
2294          * Since there is no support for separate rx/tx vlan accel
2295          * enable/disable make sure tx flag is always in same state as rx.
2296          */
2297         if (features & NETIF_F_HW_VLAN_RX)
2298                 features |= NETIF_F_HW_VLAN_TX;
2299         else
2300                 features &= ~NETIF_F_HW_VLAN_TX;
2301
2302         return features;
2303 }
2304
2305 static int qlge_set_features(struct net_device *ndev,
2306         netdev_features_t features)
2307 {
2308         netdev_features_t changed = ndev->features ^ features;
2309
2310         if (changed & NETIF_F_HW_VLAN_RX)
2311                 qlge_vlan_mode(ndev, features);
2312
2313         return 0;
2314 }
2315
2316 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2317 {
2318         u32 enable_bit = MAC_ADDR_E;
2319         int err;
2320
2321         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2322                                   MAC_ADDR_TYPE_VLAN, vid);
2323         if (err)
2324                 netif_err(qdev, ifup, qdev->ndev,
2325                           "Failed to init vlan address.\n");
2326         return err;
2327 }
2328
2329 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2330 {
2331         struct ql_adapter *qdev = netdev_priv(ndev);
2332         int status;
2333         int err;
2334
2335         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2336         if (status)
2337                 return status;
2338
2339         err = __qlge_vlan_rx_add_vid(qdev, vid);
2340         set_bit(vid, qdev->active_vlans);
2341
2342         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2343
2344         return err;
2345 }
2346
2347 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2348 {
2349         u32 enable_bit = 0;
2350         int err;
2351
2352         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2353                                   MAC_ADDR_TYPE_VLAN, vid);
2354         if (err)
2355                 netif_err(qdev, ifup, qdev->ndev,
2356                           "Failed to clear vlan address.\n");
2357         return err;
2358 }
2359
2360 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2361 {
2362         struct ql_adapter *qdev = netdev_priv(ndev);
2363         int status;
2364         int err;
2365
2366         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367         if (status)
2368                 return status;
2369
2370         err = __qlge_vlan_rx_kill_vid(qdev, vid);
2371         clear_bit(vid, qdev->active_vlans);
2372
2373         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2374
2375         return err;
2376 }
2377
2378 static void qlge_restore_vlan(struct ql_adapter *qdev)
2379 {
2380         int status;
2381         u16 vid;
2382
2383         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2384         if (status)
2385                 return;
2386
2387         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2388                 __qlge_vlan_rx_add_vid(qdev, vid);
2389
2390         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2391 }
2392
2393 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2394 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2395 {
2396         struct rx_ring *rx_ring = dev_id;
2397         napi_schedule(&rx_ring->napi);
2398         return IRQ_HANDLED;
2399 }
2400
2401 /* This handles a fatal error, MPI activity, and the default
2402  * rx_ring in an MSI-X multiple vector environment.
2403  * In MSI/Legacy environment it also process the rest of
2404  * the rx_rings.
2405  */
2406 static irqreturn_t qlge_isr(int irq, void *dev_id)
2407 {
2408         struct rx_ring *rx_ring = dev_id;
2409         struct ql_adapter *qdev = rx_ring->qdev;
2410         struct intr_context *intr_context = &qdev->intr_context[0];
2411         u32 var;
2412         int work_done = 0;
2413
2414         spin_lock(&qdev->hw_lock);
2415         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2416                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2417                              "Shared Interrupt, Not ours!\n");
2418                 spin_unlock(&qdev->hw_lock);
2419                 return IRQ_NONE;
2420         }
2421         spin_unlock(&qdev->hw_lock);
2422
2423         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2424
2425         /*
2426          * Check for fatal error.
2427          */
2428         if (var & STS_FE) {
2429                 ql_queue_asic_error(qdev);
2430                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2431                 var = ql_read32(qdev, ERR_STS);
2432                 netdev_err(qdev->ndev, "Resetting chip. "
2433                                         "Error Status Register = 0x%x\n", var);
2434                 return IRQ_HANDLED;
2435         }
2436
2437         /*
2438          * Check MPI processor activity.
2439          */
2440         if ((var & STS_PI) &&
2441                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2442                 /*
2443                  * We've got an async event or mailbox completion.
2444                  * Handle it and clear the source of the interrupt.
2445                  */
2446                 netif_err(qdev, intr, qdev->ndev,
2447                           "Got MPI processor interrupt.\n");
2448                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2449                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2450                 queue_delayed_work_on(smp_processor_id(),
2451                                 qdev->workqueue, &qdev->mpi_work, 0);
2452                 work_done++;
2453         }
2454
2455         /*
2456          * Get the bit-mask that shows the active queues for this
2457          * pass.  Compare it to the queues that this irq services
2458          * and call napi if there's a match.
2459          */
2460         var = ql_read32(qdev, ISR1);
2461         if (var & intr_context->irq_mask) {
2462                 netif_info(qdev, intr, qdev->ndev,
2463                            "Waking handler for rx_ring[0].\n");
2464                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2465                 napi_schedule(&rx_ring->napi);
2466                 work_done++;
2467         }
2468         ql_enable_completion_interrupt(qdev, intr_context->intr);
2469         return work_done ? IRQ_HANDLED : IRQ_NONE;
2470 }
2471
2472 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2473 {
2474
2475         if (skb_is_gso(skb)) {
2476                 int err;
2477                 if (skb_header_cloned(skb)) {
2478                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479                         if (err)
2480                                 return err;
2481                 }
2482
2483                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2484                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2485                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2486                 mac_iocb_ptr->total_hdrs_len =
2487                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2488                 mac_iocb_ptr->net_trans_offset =
2489                     cpu_to_le16(skb_network_offset(skb) |
2490                                 skb_transport_offset(skb)
2491                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2492                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2493                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2494                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2495                         struct iphdr *iph = ip_hdr(skb);
2496                         iph->check = 0;
2497                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2498                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2499                                                                  iph->daddr, 0,
2500                                                                  IPPROTO_TCP,
2501                                                                  0);
2502                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2503                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2504                         tcp_hdr(skb)->check =
2505                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2506                                              &ipv6_hdr(skb)->daddr,
2507                                              0, IPPROTO_TCP, 0);
2508                 }
2509                 return 1;
2510         }
2511         return 0;
2512 }
2513
2514 static void ql_hw_csum_setup(struct sk_buff *skb,
2515                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2516 {
2517         int len;
2518         struct iphdr *iph = ip_hdr(skb);
2519         __sum16 *check;
2520         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2521         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2522         mac_iocb_ptr->net_trans_offset =
2523                 cpu_to_le16(skb_network_offset(skb) |
2524                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2525
2526         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2527         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2528         if (likely(iph->protocol == IPPROTO_TCP)) {
2529                 check = &(tcp_hdr(skb)->check);
2530                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2531                 mac_iocb_ptr->total_hdrs_len =
2532                     cpu_to_le16(skb_transport_offset(skb) +
2533                                 (tcp_hdr(skb)->doff << 2));
2534         } else {
2535                 check = &(udp_hdr(skb)->check);
2536                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2537                 mac_iocb_ptr->total_hdrs_len =
2538                     cpu_to_le16(skb_transport_offset(skb) +
2539                                 sizeof(struct udphdr));
2540         }
2541         *check = ~csum_tcpudp_magic(iph->saddr,
2542                                     iph->daddr, len, iph->protocol, 0);
2543 }
2544
2545 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2546 {
2547         struct tx_ring_desc *tx_ring_desc;
2548         struct ob_mac_iocb_req *mac_iocb_ptr;
2549         struct ql_adapter *qdev = netdev_priv(ndev);
2550         int tso;
2551         struct tx_ring *tx_ring;
2552         u32 tx_ring_idx = (u32) skb->queue_mapping;
2553
2554         tx_ring = &qdev->tx_ring[tx_ring_idx];
2555
2556         if (skb_padto(skb, ETH_ZLEN))
2557                 return NETDEV_TX_OK;
2558
2559         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2560                 netif_info(qdev, tx_queued, qdev->ndev,
2561                            "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2562                            __func__, tx_ring_idx);
2563                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2564                 tx_ring->tx_errors++;
2565                 return NETDEV_TX_BUSY;
2566         }
2567         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2568         mac_iocb_ptr = tx_ring_desc->queue_entry;
2569         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2570
2571         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2572         mac_iocb_ptr->tid = tx_ring_desc->index;
2573         /* We use the upper 32-bits to store the tx queue for this IO.
2574          * When we get the completion we can use it to establish the context.
2575          */
2576         mac_iocb_ptr->txq_idx = tx_ring_idx;
2577         tx_ring_desc->skb = skb;
2578
2579         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2580
2581         if (vlan_tx_tag_present(skb)) {
2582                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2583                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2584                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2585                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2586         }
2587         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2588         if (tso < 0) {
2589                 dev_kfree_skb_any(skb);
2590                 return NETDEV_TX_OK;
2591         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2592                 ql_hw_csum_setup(skb,
2593                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2594         }
2595         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2596                         NETDEV_TX_OK) {
2597                 netif_err(qdev, tx_queued, qdev->ndev,
2598                           "Could not map the segments.\n");
2599                 tx_ring->tx_errors++;
2600                 return NETDEV_TX_BUSY;
2601         }
2602         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2603         tx_ring->prod_idx++;
2604         if (tx_ring->prod_idx == tx_ring->wq_len)
2605                 tx_ring->prod_idx = 0;
2606         wmb();
2607
2608         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2609         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2610                      "tx queued, slot %d, len %d\n",
2611                      tx_ring->prod_idx, skb->len);
2612
2613         atomic_dec(&tx_ring->tx_count);
2614
2615         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2616                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2617                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2618                         /*
2619                          * The queue got stopped because the tx_ring was full.
2620                          * Wake it up, because it's now at least 25% empty.
2621                          */
2622                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2623         }
2624         return NETDEV_TX_OK;
2625 }
2626
2627
2628 static void ql_free_shadow_space(struct ql_adapter *qdev)
2629 {
2630         if (qdev->rx_ring_shadow_reg_area) {
2631                 pci_free_consistent(qdev->pdev,
2632                                     PAGE_SIZE,
2633                                     qdev->rx_ring_shadow_reg_area,
2634                                     qdev->rx_ring_shadow_reg_dma);
2635                 qdev->rx_ring_shadow_reg_area = NULL;
2636         }
2637         if (qdev->tx_ring_shadow_reg_area) {
2638                 pci_free_consistent(qdev->pdev,
2639                                     PAGE_SIZE,
2640                                     qdev->tx_ring_shadow_reg_area,
2641                                     qdev->tx_ring_shadow_reg_dma);
2642                 qdev->tx_ring_shadow_reg_area = NULL;
2643         }
2644 }
2645
2646 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2647 {
2648         qdev->rx_ring_shadow_reg_area =
2649             pci_alloc_consistent(qdev->pdev,
2650                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2651         if (qdev->rx_ring_shadow_reg_area == NULL) {
2652                 netif_err(qdev, ifup, qdev->ndev,
2653                           "Allocation of RX shadow space failed.\n");
2654                 return -ENOMEM;
2655         }
2656         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2657         qdev->tx_ring_shadow_reg_area =
2658             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2659                                  &qdev->tx_ring_shadow_reg_dma);
2660         if (qdev->tx_ring_shadow_reg_area == NULL) {
2661                 netif_err(qdev, ifup, qdev->ndev,
2662                           "Allocation of TX shadow space failed.\n");
2663                 goto err_wqp_sh_area;
2664         }
2665         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2666         return 0;
2667
2668 err_wqp_sh_area:
2669         pci_free_consistent(qdev->pdev,
2670                             PAGE_SIZE,
2671                             qdev->rx_ring_shadow_reg_area,
2672                             qdev->rx_ring_shadow_reg_dma);
2673         return -ENOMEM;
2674 }
2675
2676 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2677 {
2678         struct tx_ring_desc *tx_ring_desc;
2679         int i;
2680         struct ob_mac_iocb_req *mac_iocb_ptr;
2681
2682         mac_iocb_ptr = tx_ring->wq_base;
2683         tx_ring_desc = tx_ring->q;
2684         for (i = 0; i < tx_ring->wq_len; i++) {
2685                 tx_ring_desc->index = i;
2686                 tx_ring_desc->skb = NULL;
2687                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2688                 mac_iocb_ptr++;
2689                 tx_ring_desc++;
2690         }
2691         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2692 }
2693
2694 static void ql_free_tx_resources(struct ql_adapter *qdev,
2695                                  struct tx_ring *tx_ring)
2696 {
2697         if (tx_ring->wq_base) {
2698                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2699                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2700                 tx_ring->wq_base = NULL;
2701         }
2702         kfree(tx_ring->q);
2703         tx_ring->q = NULL;
2704 }
2705
2706 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2707                                  struct tx_ring *tx_ring)
2708 {
2709         tx_ring->wq_base =
2710             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2711                                  &tx_ring->wq_base_dma);
2712
2713         if ((tx_ring->wq_base == NULL) ||
2714             tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2715                 goto pci_alloc_err;
2716
2717         tx_ring->q =
2718             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2719         if (tx_ring->q == NULL)
2720                 goto err;
2721
2722         return 0;
2723 err:
2724         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2725                             tx_ring->wq_base, tx_ring->wq_base_dma);
2726         tx_ring->wq_base = NULL;
2727 pci_alloc_err:
2728         netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2729         return -ENOMEM;
2730 }
2731
2732 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2733 {
2734         struct bq_desc *lbq_desc;
2735
2736         uint32_t  curr_idx, clean_idx;
2737
2738         curr_idx = rx_ring->lbq_curr_idx;
2739         clean_idx = rx_ring->lbq_clean_idx;
2740         while (curr_idx != clean_idx) {
2741                 lbq_desc = &rx_ring->lbq[curr_idx];
2742
2743                 if (lbq_desc->p.pg_chunk.last_flag) {
2744                         pci_unmap_page(qdev->pdev,
2745                                 lbq_desc->p.pg_chunk.map,
2746                                 ql_lbq_block_size(qdev),
2747                                        PCI_DMA_FROMDEVICE);
2748                         lbq_desc->p.pg_chunk.last_flag = 0;
2749                 }
2750
2751                 put_page(lbq_desc->p.pg_chunk.page);
2752                 lbq_desc->p.pg_chunk.page = NULL;
2753
2754                 if (++curr_idx == rx_ring->lbq_len)
2755                         curr_idx = 0;
2756
2757         }
2758 }
2759
2760 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2761 {
2762         int i;
2763         struct bq_desc *sbq_desc;
2764
2765         for (i = 0; i < rx_ring->sbq_len; i++) {
2766                 sbq_desc = &rx_ring->sbq[i];
2767                 if (sbq_desc == NULL) {
2768                         netif_err(qdev, ifup, qdev->ndev,
2769                                   "sbq_desc %d is NULL.\n", i);
2770                         return;
2771                 }
2772                 if (sbq_desc->p.skb) {
2773                         pci_unmap_single(qdev->pdev,
2774                                          dma_unmap_addr(sbq_desc, mapaddr),
2775                                          dma_unmap_len(sbq_desc, maplen),
2776                                          PCI_DMA_FROMDEVICE);
2777                         dev_kfree_skb(sbq_desc->p.skb);
2778                         sbq_desc->p.skb = NULL;
2779                 }
2780         }
2781 }
2782
2783 /* Free all large and small rx buffers associated
2784  * with the completion queues for this device.
2785  */
2786 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2787 {
2788         int i;
2789         struct rx_ring *rx_ring;
2790
2791         for (i = 0; i < qdev->rx_ring_count; i++) {
2792                 rx_ring = &qdev->rx_ring[i];
2793                 if (rx_ring->lbq)
2794                         ql_free_lbq_buffers(qdev, rx_ring);
2795                 if (rx_ring->sbq)
2796                         ql_free_sbq_buffers(qdev, rx_ring);
2797         }
2798 }
2799
2800 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2801 {
2802         struct rx_ring *rx_ring;
2803         int i;
2804
2805         for (i = 0; i < qdev->rx_ring_count; i++) {
2806                 rx_ring = &qdev->rx_ring[i];
2807                 if (rx_ring->type != TX_Q)
2808                         ql_update_buffer_queues(qdev, rx_ring);
2809         }
2810 }
2811
2812 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2813                                 struct rx_ring *rx_ring)
2814 {
2815         int i;
2816         struct bq_desc *lbq_desc;
2817         __le64 *bq = rx_ring->lbq_base;
2818
2819         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2820         for (i = 0; i < rx_ring->lbq_len; i++) {
2821                 lbq_desc = &rx_ring->lbq[i];
2822                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2823                 lbq_desc->index = i;
2824                 lbq_desc->addr = bq;
2825                 bq++;
2826         }
2827 }
2828
2829 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2830                                 struct rx_ring *rx_ring)
2831 {
2832         int i;
2833         struct bq_desc *sbq_desc;
2834         __le64 *bq = rx_ring->sbq_base;
2835
2836         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2837         for (i = 0; i < rx_ring->sbq_len; i++) {
2838                 sbq_desc = &rx_ring->sbq[i];
2839                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2840                 sbq_desc->index = i;
2841                 sbq_desc->addr = bq;
2842                 bq++;
2843         }
2844 }
2845
2846 static void ql_free_rx_resources(struct ql_adapter *qdev,
2847                                  struct rx_ring *rx_ring)
2848 {
2849         /* Free the small buffer queue. */
2850         if (rx_ring->sbq_base) {
2851                 pci_free_consistent(qdev->pdev,
2852                                     rx_ring->sbq_size,
2853                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2854                 rx_ring->sbq_base = NULL;
2855         }
2856
2857         /* Free the small buffer queue control blocks. */
2858         kfree(rx_ring->sbq);
2859         rx_ring->sbq = NULL;
2860
2861         /* Free the large buffer queue. */
2862         if (rx_ring->lbq_base) {
2863                 pci_free_consistent(qdev->pdev,
2864                                     rx_ring->lbq_size,
2865                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2866                 rx_ring->lbq_base = NULL;
2867         }
2868
2869         /* Free the large buffer queue control blocks. */
2870         kfree(rx_ring->lbq);
2871         rx_ring->lbq = NULL;
2872
2873         /* Free the rx queue. */
2874         if (rx_ring->cq_base) {
2875                 pci_free_consistent(qdev->pdev,
2876                                     rx_ring->cq_size,
2877                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2878                 rx_ring->cq_base = NULL;
2879         }
2880 }
2881
2882 /* Allocate queues and buffers for this completions queue based
2883  * on the values in the parameter structure. */
2884 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2885                                  struct rx_ring *rx_ring)
2886 {
2887
2888         /*
2889          * Allocate the completion queue for this rx_ring.
2890          */
2891         rx_ring->cq_base =
2892             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2893                                  &rx_ring->cq_base_dma);
2894
2895         if (rx_ring->cq_base == NULL) {
2896                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2897                 return -ENOMEM;
2898         }
2899
2900         if (rx_ring->sbq_len) {
2901                 /*
2902                  * Allocate small buffer queue.
2903                  */
2904                 rx_ring->sbq_base =
2905                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2906                                          &rx_ring->sbq_base_dma);
2907
2908                 if (rx_ring->sbq_base == NULL) {
2909                         netif_err(qdev, ifup, qdev->ndev,
2910                                   "Small buffer queue allocation failed.\n");
2911                         goto err_mem;
2912                 }
2913
2914                 /*
2915                  * Allocate small buffer queue control blocks.
2916                  */
2917                 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2918                                              sizeof(struct bq_desc),
2919                                              GFP_KERNEL);
2920                 if (rx_ring->sbq == NULL)
2921                         goto err_mem;
2922
2923                 ql_init_sbq_ring(qdev, rx_ring);
2924         }
2925
2926         if (rx_ring->lbq_len) {
2927                 /*
2928                  * Allocate large buffer queue.
2929                  */
2930                 rx_ring->lbq_base =
2931                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2932                                          &rx_ring->lbq_base_dma);
2933
2934                 if (rx_ring->lbq_base == NULL) {
2935                         netif_err(qdev, ifup, qdev->ndev,
2936                                   "Large buffer queue allocation failed.\n");
2937                         goto err_mem;
2938                 }
2939                 /*
2940                  * Allocate large buffer queue control blocks.
2941                  */
2942                 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2943                                              sizeof(struct bq_desc),
2944                                              GFP_KERNEL);
2945                 if (rx_ring->lbq == NULL)
2946                         goto err_mem;
2947
2948                 ql_init_lbq_ring(qdev, rx_ring);
2949         }
2950
2951         return 0;
2952
2953 err_mem:
2954         ql_free_rx_resources(qdev, rx_ring);
2955         return -ENOMEM;
2956 }
2957
2958 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2959 {
2960         struct tx_ring *tx_ring;
2961         struct tx_ring_desc *tx_ring_desc;
2962         int i, j;
2963
2964         /*
2965          * Loop through all queues and free
2966          * any resources.
2967          */
2968         for (j = 0; j < qdev->tx_ring_count; j++) {
2969                 tx_ring = &qdev->tx_ring[j];
2970                 for (i = 0; i < tx_ring->wq_len; i++) {
2971                         tx_ring_desc = &tx_ring->q[i];
2972                         if (tx_ring_desc && tx_ring_desc->skb) {
2973                                 netif_err(qdev, ifdown, qdev->ndev,
2974                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2975                                           tx_ring_desc->skb, j,
2976                                           tx_ring_desc->index);
2977                                 ql_unmap_send(qdev, tx_ring_desc,
2978                                               tx_ring_desc->map_cnt);
2979                                 dev_kfree_skb(tx_ring_desc->skb);
2980                                 tx_ring_desc->skb = NULL;
2981                         }
2982                 }
2983         }
2984 }
2985
2986 static void ql_free_mem_resources(struct ql_adapter *qdev)
2987 {
2988         int i;
2989
2990         for (i = 0; i < qdev->tx_ring_count; i++)
2991                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2992         for (i = 0; i < qdev->rx_ring_count; i++)
2993                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2994         ql_free_shadow_space(qdev);
2995 }
2996
2997 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2998 {
2999         int i;
3000
3001         /* Allocate space for our shadow registers and such. */
3002         if (ql_alloc_shadow_space(qdev))
3003                 return -ENOMEM;
3004
3005         for (i = 0; i < qdev->rx_ring_count; i++) {
3006                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3007                         netif_err(qdev, ifup, qdev->ndev,
3008                                   "RX resource allocation failed.\n");
3009                         goto err_mem;
3010                 }
3011         }
3012         /* Allocate tx queue resources */
3013         for (i = 0; i < qdev->tx_ring_count; i++) {
3014                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3015                         netif_err(qdev, ifup, qdev->ndev,
3016                                   "TX resource allocation failed.\n");
3017                         goto err_mem;
3018                 }
3019         }
3020         return 0;
3021
3022 err_mem:
3023         ql_free_mem_resources(qdev);
3024         return -ENOMEM;
3025 }
3026
3027 /* Set up the rx ring control block and pass it to the chip.
3028  * The control block is defined as
3029  * "Completion Queue Initialization Control Block", or cqicb.
3030  */
3031 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3032 {
3033         struct cqicb *cqicb = &rx_ring->cqicb;
3034         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3035                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3036         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3037                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3038         void __iomem *doorbell_area =
3039             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3040         int err = 0;
3041         u16 bq_len;
3042         u64 tmp;
3043         __le64 *base_indirect_ptr;
3044         int page_entries;
3045
3046         /* Set up the shadow registers for this ring. */
3047         rx_ring->prod_idx_sh_reg = shadow_reg;
3048         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3049         *rx_ring->prod_idx_sh_reg = 0;
3050         shadow_reg += sizeof(u64);
3051         shadow_reg_dma += sizeof(u64);
3052         rx_ring->lbq_base_indirect = shadow_reg;
3053         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3054         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3055         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3056         rx_ring->sbq_base_indirect = shadow_reg;
3057         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3058
3059         /* PCI doorbell mem area + 0x00 for consumer index register */
3060         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3061         rx_ring->cnsmr_idx = 0;
3062         rx_ring->curr_entry = rx_ring->cq_base;
3063
3064         /* PCI doorbell mem area + 0x04 for valid register */
3065         rx_ring->valid_db_reg = doorbell_area + 0x04;
3066
3067         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3068         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3069
3070         /* PCI doorbell mem area + 0x1c */
3071         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3072
3073         memset((void *)cqicb, 0, sizeof(struct cqicb));
3074         cqicb->msix_vect = rx_ring->irq;
3075
3076         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3077         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3078
3079         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3080
3081         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3082
3083         /*
3084          * Set up the control block load flags.
3085          */
3086         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3087             FLAGS_LV |          /* Load MSI-X vector */
3088             FLAGS_LI;           /* Load irq delay values */
3089         if (rx_ring->lbq_len) {
3090                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3091                 tmp = (u64)rx_ring->lbq_base_dma;
3092                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3093                 page_entries = 0;
3094                 do {
3095                         *base_indirect_ptr = cpu_to_le64(tmp);
3096                         tmp += DB_PAGE_SIZE;
3097                         base_indirect_ptr++;
3098                         page_entries++;
3099                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3100                 cqicb->lbq_addr =
3101                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3102                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3103                         (u16) rx_ring->lbq_buf_size;
3104                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3105                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3106                         (u16) rx_ring->lbq_len;
3107                 cqicb->lbq_len = cpu_to_le16(bq_len);
3108                 rx_ring->lbq_prod_idx = 0;
3109                 rx_ring->lbq_curr_idx = 0;
3110                 rx_ring->lbq_clean_idx = 0;
3111                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3112         }
3113         if (rx_ring->sbq_len) {
3114                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3115                 tmp = (u64)rx_ring->sbq_base_dma;
3116                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3117                 page_entries = 0;
3118                 do {
3119                         *base_indirect_ptr = cpu_to_le64(tmp);
3120                         tmp += DB_PAGE_SIZE;
3121                         base_indirect_ptr++;
3122                         page_entries++;
3123                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3124                 cqicb->sbq_addr =
3125                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3126                 cqicb->sbq_buf_size =
3127                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3128                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3129                         (u16) rx_ring->sbq_len;
3130                 cqicb->sbq_len = cpu_to_le16(bq_len);
3131                 rx_ring->sbq_prod_idx = 0;
3132                 rx_ring->sbq_curr_idx = 0;
3133                 rx_ring->sbq_clean_idx = 0;
3134                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3135         }
3136         switch (rx_ring->type) {
3137         case TX_Q:
3138                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3139                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3140                 break;
3141         case RX_Q:
3142                 /* Inbound completion handling rx_rings run in
3143                  * separate NAPI contexts.
3144                  */
3145                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3146                                64);
3147                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3148                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3149                 break;
3150         default:
3151                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3152                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3153         }
3154         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3155                            CFG_LCQ, rx_ring->cq_id);
3156         if (err) {
3157                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3158                 return err;
3159         }
3160         return err;
3161 }
3162
3163 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3164 {
3165         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3166         void __iomem *doorbell_area =
3167             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3168         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3169             (tx_ring->wq_id * sizeof(u64));
3170         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3171             (tx_ring->wq_id * sizeof(u64));
3172         int err = 0;
3173
3174         /*
3175          * Assign doorbell registers for this tx_ring.
3176          */
3177         /* TX PCI doorbell mem area for tx producer index */
3178         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3179         tx_ring->prod_idx = 0;
3180         /* TX PCI doorbell mem area + 0x04 */
3181         tx_ring->valid_db_reg = doorbell_area + 0x04;
3182
3183         /*
3184          * Assign shadow registers for this tx_ring.
3185          */
3186         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3187         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3188
3189         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3190         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3191                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3192         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3193         wqicb->rid = 0;
3194         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3195
3196         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3197
3198         ql_init_tx_ring(qdev, tx_ring);
3199
3200         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3201                            (u16) tx_ring->wq_id);
3202         if (err) {
3203                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3204                 return err;
3205         }
3206         return err;
3207 }
3208
3209 static void ql_disable_msix(struct ql_adapter *qdev)
3210 {
3211         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3212                 pci_disable_msix(qdev->pdev);
3213                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3214                 kfree(qdev->msi_x_entry);
3215                 qdev->msi_x_entry = NULL;
3216         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3217                 pci_disable_msi(qdev->pdev);
3218                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3219         }
3220 }
3221
3222 /* We start by trying to get the number of vectors
3223  * stored in qdev->intr_count. If we don't get that
3224  * many then we reduce the count and try again.
3225  */
3226 static void ql_enable_msix(struct ql_adapter *qdev)
3227 {
3228         int i, err;
3229
3230         /* Get the MSIX vectors. */
3231         if (qlge_irq_type == MSIX_IRQ) {
3232                 /* Try to alloc space for the msix struct,
3233                  * if it fails then go to MSI/legacy.
3234                  */
3235                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3236                                             sizeof(struct msix_entry),
3237                                             GFP_KERNEL);
3238                 if (!qdev->msi_x_entry) {
3239                         qlge_irq_type = MSI_IRQ;
3240                         goto msi;
3241                 }
3242
3243                 for (i = 0; i < qdev->intr_count; i++)
3244                         qdev->msi_x_entry[i].entry = i;
3245
3246                 /* Loop to get our vectors.  We start with
3247                  * what we want and settle for what we get.
3248                  */
3249                 do {
3250                         err = pci_enable_msix(qdev->pdev,
3251                                 qdev->msi_x_entry, qdev->intr_count);
3252                         if (err > 0)
3253                                 qdev->intr_count = err;
3254                 } while (err > 0);
3255
3256                 if (err < 0) {
3257                         kfree(qdev->msi_x_entry);
3258                         qdev->msi_x_entry = NULL;
3259                         netif_warn(qdev, ifup, qdev->ndev,
3260                                    "MSI-X Enable failed, trying MSI.\n");
3261                         qdev->intr_count = 1;
3262                         qlge_irq_type = MSI_IRQ;
3263                 } else if (err == 0) {
3264                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3265                         netif_info(qdev, ifup, qdev->ndev,
3266                                    "MSI-X Enabled, got %d vectors.\n",
3267                                    qdev->intr_count);
3268                         return;
3269                 }
3270         }
3271 msi:
3272         qdev->intr_count = 1;
3273         if (qlge_irq_type == MSI_IRQ) {
3274                 if (!pci_enable_msi(qdev->pdev)) {
3275                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3276                         netif_info(qdev, ifup, qdev->ndev,
3277                                    "Running with MSI interrupts.\n");
3278                         return;
3279                 }
3280         }
3281         qlge_irq_type = LEG_IRQ;
3282         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3283                      "Running with legacy interrupts.\n");
3284 }
3285
3286 /* Each vector services 1 RSS ring and and 1 or more
3287  * TX completion rings.  This function loops through
3288  * the TX completion rings and assigns the vector that
3289  * will service it.  An example would be if there are
3290  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3291  * This would mean that vector 0 would service RSS ring 0
3292  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3293  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3294  */
3295 static void ql_set_tx_vect(struct ql_adapter *qdev)
3296 {
3297         int i, j, vect;
3298         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3299
3300         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3301                 /* Assign irq vectors to TX rx_rings.*/
3302                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3303                                          i < qdev->rx_ring_count; i++) {
3304                         if (j == tx_rings_per_vector) {
3305                                 vect++;
3306                                 j = 0;
3307                         }
3308                         qdev->rx_ring[i].irq = vect;
3309                         j++;
3310                 }
3311         } else {
3312                 /* For single vector all rings have an irq
3313                  * of zero.
3314                  */
3315                 for (i = 0; i < qdev->rx_ring_count; i++)
3316                         qdev->rx_ring[i].irq = 0;
3317         }
3318 }
3319
3320 /* Set the interrupt mask for this vector.  Each vector
3321  * will service 1 RSS ring and 1 or more TX completion
3322  * rings.  This function sets up a bit mask per vector
3323  * that indicates which rings it services.
3324  */
3325 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3326 {
3327         int j, vect = ctx->intr;
3328         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3329
3330         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3331                 /* Add the RSS ring serviced by this vector
3332                  * to the mask.
3333                  */
3334                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3335                 /* Add the TX ring(s) serviced by this vector
3336                  * to the mask. */
3337                 for (j = 0; j < tx_rings_per_vector; j++) {
3338                         ctx->irq_mask |=
3339                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3340                         (vect * tx_rings_per_vector) + j].cq_id);
3341                 }
3342         } else {
3343                 /* For single vector we just shift each queue's
3344                  * ID into the mask.
3345                  */
3346                 for (j = 0; j < qdev->rx_ring_count; j++)
3347                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3348         }
3349 }
3350
3351 /*
3352  * Here we build the intr_context structures based on
3353  * our rx_ring count and intr vector count.
3354  * The intr_context structure is used to hook each vector
3355  * to possibly different handlers.
3356  */
3357 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3358 {
3359         int i = 0;
3360         struct intr_context *intr_context = &qdev->intr_context[0];
3361
3362         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3363                 /* Each rx_ring has it's
3364                  * own intr_context since we have separate
3365                  * vectors for each queue.
3366                  */
3367                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3368                         qdev->rx_ring[i].irq = i;
3369                         intr_context->intr = i;
3370                         intr_context->qdev = qdev;
3371                         /* Set up this vector's bit-mask that indicates
3372                          * which queues it services.
3373                          */
3374                         ql_set_irq_mask(qdev, intr_context);
3375                         /*
3376                          * We set up each vectors enable/disable/read bits so
3377                          * there's no bit/mask calculations in the critical path.
3378                          */
3379                         intr_context->intr_en_mask =
3380                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3382                             | i;
3383                         intr_context->intr_dis_mask =
3384                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3386                             INTR_EN_IHD | i;
3387                         intr_context->intr_read_mask =
3388                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3389                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3390                             i;
3391                         if (i == 0) {
3392                                 /* The first vector/queue handles
3393                                  * broadcast/multicast, fatal errors,
3394                                  * and firmware events.  This in addition
3395                                  * to normal inbound NAPI processing.
3396                                  */
3397                                 intr_context->handler = qlge_isr;
3398                                 sprintf(intr_context->name, "%s-rx-%d",
3399                                         qdev->ndev->name, i);
3400                         } else {
3401                                 /*
3402                                  * Inbound queues handle unicast frames only.
3403                                  */
3404                                 intr_context->handler = qlge_msix_rx_isr;
3405                                 sprintf(intr_context->name, "%s-rx-%d",
3406                                         qdev->ndev->name, i);
3407                         }
3408                 }
3409         } else {
3410                 /*
3411                  * All rx_rings use the same intr_context since
3412                  * there is only one vector.
3413                  */
3414                 intr_context->intr = 0;
3415                 intr_context->qdev = qdev;
3416                 /*
3417                  * We set up each vectors enable/disable/read bits so
3418                  * there's no bit/mask calculations in the critical path.
3419                  */
3420                 intr_context->intr_en_mask =
3421                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3422                 intr_context->intr_dis_mask =
3423                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3424                     INTR_EN_TYPE_DISABLE;
3425                 intr_context->intr_read_mask =
3426                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3427                 /*
3428                  * Single interrupt means one handler for all rings.
3429                  */
3430                 intr_context->handler = qlge_isr;
3431                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3432                 /* Set up this vector's bit-mask that indicates
3433                  * which queues it services. In this case there is
3434                  * a single vector so it will service all RSS and
3435                  * TX completion rings.
3436                  */
3437                 ql_set_irq_mask(qdev, intr_context);
3438         }
3439         /* Tell the TX completion rings which MSIx vector
3440          * they will be using.
3441          */
3442         ql_set_tx_vect(qdev);
3443 }
3444
3445 static void ql_free_irq(struct ql_adapter *qdev)
3446 {
3447         int i;
3448         struct intr_context *intr_context = &qdev->intr_context[0];
3449
3450         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3451                 if (intr_context->hooked) {
3452                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3453                                 free_irq(qdev->msi_x_entry[i].vector,
3454                                          &qdev->rx_ring[i]);
3455                         } else {
3456                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3457                         }
3458                 }
3459         }
3460         ql_disable_msix(qdev);
3461 }
3462
3463 static int ql_request_irq(struct ql_adapter *qdev)
3464 {
3465         int i;
3466         int status = 0;
3467         struct pci_dev *pdev = qdev->pdev;
3468         struct intr_context *intr_context = &qdev->intr_context[0];
3469
3470         ql_resolve_queues_to_irqs(qdev);
3471
3472         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3473                 atomic_set(&intr_context->irq_cnt, 0);
3474                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3475                         status = request_irq(qdev->msi_x_entry[i].vector,
3476                                              intr_context->handler,
3477                                              0,
3478                                              intr_context->name,
3479                                              &qdev->rx_ring[i]);
3480                         if (status) {
3481                                 netif_err(qdev, ifup, qdev->ndev,
3482                                           "Failed request for MSIX interrupt %d.\n",
3483                                           i);
3484                                 goto err_irq;
3485                         }
3486                 } else {
3487                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3488                                      "trying msi or legacy interrupts.\n");
3489                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3490                                      "%s: irq = %d.\n", __func__, pdev->irq);
3491                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3492                                      "%s: context->name = %s.\n", __func__,
3493                                      intr_context->name);
3494                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3495                                      "%s: dev_id = 0x%p.\n", __func__,
3496                                      &qdev->rx_ring[0]);
3497                         status =
3498                             request_irq(pdev->irq, qlge_isr,
3499                                         test_bit(QL_MSI_ENABLED,
3500                                                  &qdev->
3501                                                  flags) ? 0 : IRQF_SHARED,
3502                                         intr_context->name, &qdev->rx_ring[0]);
3503                         if (status)
3504                                 goto err_irq;
3505
3506                         netif_err(qdev, ifup, qdev->ndev,
3507                                   "Hooked intr %d, queue type %s, with name %s.\n",
3508                                   i,
3509                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3510                                   "DEFAULT_Q" :
3511                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3512                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3513                                   intr_context->name);
3514                 }
3515                 intr_context->hooked = 1;
3516         }
3517         return status;
3518 err_irq:
3519         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3520         ql_free_irq(qdev);
3521         return status;
3522 }
3523
3524 static int ql_start_rss(struct ql_adapter *qdev)
3525 {
3526         static const u8 init_hash_seed[] = {
3527                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3528                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3529                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3530                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3531                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3532         };
3533         struct ricb *ricb = &qdev->ricb;
3534         int status = 0;
3535         int i;
3536         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3537
3538         memset((void *)ricb, 0, sizeof(*ricb));
3539
3540         ricb->base_cq = RSS_L4K;
3541         ricb->flags =
3542                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3543         ricb->mask = cpu_to_le16((u16)(0x3ff));
3544
3545         /*
3546          * Fill out the Indirection Table.
3547          */
3548         for (i = 0; i < 1024; i++)
3549                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3550
3551         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3552         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3553
3554         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3555         if (status) {
3556                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3557                 return status;
3558         }
3559         return status;
3560 }
3561
3562 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3563 {
3564         int i, status = 0;
3565
3566         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3567         if (status)
3568                 return status;
3569         /* Clear all the entries in the routing table. */
3570         for (i = 0; i < 16; i++) {
3571                 status = ql_set_routing_reg(qdev, i, 0, 0);
3572                 if (status) {
3573                         netif_err(qdev, ifup, qdev->ndev,
3574                                   "Failed to init routing register for CAM packets.\n");
3575                         break;
3576                 }
3577         }
3578         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3579         return status;
3580 }
3581
3582 /* Initialize the frame-to-queue routing. */
3583 static int ql_route_initialize(struct ql_adapter *qdev)
3584 {
3585         int status = 0;
3586
3587         /* Clear all the entries in the routing table. */
3588         status = ql_clear_routing_entries(qdev);
3589         if (status)
3590                 return status;
3591
3592         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3593         if (status)
3594                 return status;
3595
3596         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3597                                                 RT_IDX_IP_CSUM_ERR, 1);
3598         if (status) {
3599                 netif_err(qdev, ifup, qdev->ndev,
3600                         "Failed to init routing register "
3601                         "for IP CSUM error packets.\n");
3602                 goto exit;
3603         }
3604         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3605                                                 RT_IDX_TU_CSUM_ERR, 1);
3606         if (status) {
3607                 netif_err(qdev, ifup, qdev->ndev,
3608                         "Failed to init routing register "
3609                         "for TCP/UDP CSUM error packets.\n");
3610                 goto exit;
3611         }
3612         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3613         if (status) {
3614                 netif_err(qdev, ifup, qdev->ndev,
3615                           "Failed to init routing register for broadcast packets.\n");
3616                 goto exit;
3617         }
3618         /* If we have more than one inbound queue, then turn on RSS in the
3619          * routing block.
3620          */
3621         if (qdev->rss_ring_count > 1) {
3622                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3623                                         RT_IDX_RSS_MATCH, 1);
3624                 if (status) {
3625                         netif_err(qdev, ifup, qdev->ndev,
3626                                   "Failed to init routing register for MATCH RSS packets.\n");
3627                         goto exit;
3628                 }
3629         }
3630
3631         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3632                                     RT_IDX_CAM_HIT, 1);
3633         if (status)
3634                 netif_err(qdev, ifup, qdev->ndev,
3635                           "Failed to init routing register for CAM packets.\n");
3636 exit:
3637         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3638         return status;
3639 }
3640
3641 int ql_cam_route_initialize(struct ql_adapter *qdev)
3642 {
3643         int status, set;
3644
3645         /* If check if the link is up and use to
3646          * determine if we are setting or clearing
3647          * the MAC address in the CAM.
3648          */
3649         set = ql_read32(qdev, STS);
3650         set &= qdev->port_link_up;
3651         status = ql_set_mac_addr(qdev, set);
3652         if (status) {
3653                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3654                 return status;
3655         }
3656
3657         status = ql_route_initialize(qdev);
3658         if (status)
3659                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3660
3661         return status;
3662 }
3663
3664 static int ql_adapter_initialize(struct ql_adapter *qdev)
3665 {
3666         u32 value, mask;
3667         int i;
3668         int status = 0;
3669
3670         /*
3671          * Set up the System register to halt on errors.
3672          */
3673         value = SYS_EFE | SYS_FAE;
3674         mask = value << 16;
3675         ql_write32(qdev, SYS, mask | value);
3676
3677         /* Set the default queue, and VLAN behavior. */
3678         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3679         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3680         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3681
3682         /* Set the MPI interrupt to enabled. */
3683         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3684
3685         /* Enable the function, set pagesize, enable error checking. */
3686         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3687             FSC_EC | FSC_VM_PAGE_4K;
3688         value |= SPLT_SETTING;
3689
3690         /* Set/clear header splitting. */
3691         mask = FSC_VM_PAGESIZE_MASK |
3692             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3693         ql_write32(qdev, FSC, mask | value);
3694
3695         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3696
3697         /* Set RX packet routing to use port/pci function on which the
3698          * packet arrived on in addition to usual frame routing.
3699          * This is helpful on bonding where both interfaces can have
3700          * the same MAC address.
3701          */
3702         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3703         /* Reroute all packets to our Interface.
3704          * They may have been routed to MPI firmware
3705          * due to WOL.
3706          */
3707         value = ql_read32(qdev, MGMT_RCV_CFG);
3708         value &= ~MGMT_RCV_CFG_RM;
3709         mask = 0xffff0000;
3710
3711         /* Sticky reg needs clearing due to WOL. */
3712         ql_write32(qdev, MGMT_RCV_CFG, mask);
3713         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3714
3715         /* Default WOL is enable on Mezz cards */
3716         if (qdev->pdev->subsystem_device == 0x0068 ||
3717                         qdev->pdev->subsystem_device == 0x0180)
3718                 qdev->wol = WAKE_MAGIC;
3719
3720         /* Start up the rx queues. */
3721         for (i = 0; i < qdev->rx_ring_count; i++) {
3722                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3723                 if (status) {
3724                         netif_err(qdev, ifup, qdev->ndev,
3725                                   "Failed to start rx ring[%d].\n", i);
3726                         return status;
3727                 }
3728         }
3729
3730         /* If there is more than one inbound completion queue
3731          * then download a RICB to configure RSS.
3732          */
3733         if (qdev->rss_ring_count > 1) {
3734                 status = ql_start_rss(qdev);
3735                 if (status) {
3736                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3737                         return status;
3738                 }
3739         }
3740
3741         /* Start up the tx queues. */
3742         for (i = 0; i < qdev->tx_ring_count; i++) {
3743                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3744                 if (status) {
3745                         netif_err(qdev, ifup, qdev->ndev,
3746                                   "Failed to start tx ring[%d].\n", i);
3747                         return status;
3748                 }
3749         }
3750
3751         /* Initialize the port and set the max framesize. */
3752         status = qdev->nic_ops->port_initialize(qdev);
3753         if (status)
3754                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3755
3756         /* Set up the MAC address and frame routing filter. */
3757         status = ql_cam_route_initialize(qdev);
3758         if (status) {
3759                 netif_err(qdev, ifup, qdev->ndev,
3760                           "Failed to init CAM/Routing tables.\n");
3761                 return status;
3762         }
3763
3764         /* Start NAPI for the RSS queues. */
3765         for (i = 0; i < qdev->rss_ring_count; i++)
3766                 napi_enable(&qdev->rx_ring[i].napi);
3767
3768         return status;
3769 }
3770
3771 /* Issue soft reset to chip. */
3772 static int ql_adapter_reset(struct ql_adapter *qdev)
3773 {
3774         u32 value;
3775         int status = 0;
3776         unsigned long end_jiffies;
3777
3778         /* Clear all the entries in the routing table. */
3779         status = ql_clear_routing_entries(qdev);
3780         if (status) {
3781                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3782                 return status;
3783         }
3784
3785         end_jiffies = jiffies +
3786                 max((unsigned long)1, usecs_to_jiffies(30));
3787
3788         /* Check if bit is set then skip the mailbox command and
3789          * clear the bit, else we are in normal reset process.
3790          */
3791         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3792                 /* Stop management traffic. */
3793                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3794
3795                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3796                 ql_wait_fifo_empty(qdev);
3797         } else
3798                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3799
3800         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3801
3802         do {
3803                 value = ql_read32(qdev, RST_FO);
3804                 if ((value & RST_FO_FR) == 0)
3805                         break;
3806                 cpu_relax();
3807         } while (time_before(jiffies, end_jiffies));
3808
3809         if (value & RST_FO_FR) {
3810                 netif_err(qdev, ifdown, qdev->ndev,
3811                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3812                 status = -ETIMEDOUT;
3813         }
3814
3815         /* Resume management traffic. */
3816         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3817         return status;
3818 }
3819
3820 static void ql_display_dev_info(struct net_device *ndev)
3821 {
3822         struct ql_adapter *qdev = netdev_priv(ndev);
3823
3824         netif_info(qdev, probe, qdev->ndev,
3825                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3826                    "XG Roll = %d, XG Rev = %d.\n",
3827                    qdev->func,
3828                    qdev->port,
3829                    qdev->chip_rev_id & 0x0000000f,
3830                    qdev->chip_rev_id >> 4 & 0x0000000f,
3831                    qdev->chip_rev_id >> 8 & 0x0000000f,
3832                    qdev->chip_rev_id >> 12 & 0x0000000f);
3833         netif_info(qdev, probe, qdev->ndev,
3834                    "MAC address %pM\n", ndev->dev_addr);
3835 }
3836
3837 static int ql_wol(struct ql_adapter *qdev)
3838 {
3839         int status = 0;
3840         u32 wol = MB_WOL_DISABLE;
3841
3842         /* The CAM is still intact after a reset, but if we
3843          * are doing WOL, then we may need to program the
3844          * routing regs. We would also need to issue the mailbox
3845          * commands to instruct the MPI what to do per the ethtool
3846          * settings.
3847          */
3848
3849         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3850                         WAKE_MCAST | WAKE_BCAST)) {
3851                 netif_err(qdev, ifdown, qdev->ndev,
3852                           "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3853                           qdev->wol);
3854                 return -EINVAL;
3855         }
3856
3857         if (qdev->wol & WAKE_MAGIC) {
3858                 status = ql_mb_wol_set_magic(qdev, 1);
3859                 if (status) {
3860                         netif_err(qdev, ifdown, qdev->ndev,
3861                                   "Failed to set magic packet on %s.\n",
3862                                   qdev->ndev->name);
3863                         return status;
3864                 } else
3865                         netif_info(qdev, drv, qdev->ndev,
3866                                    "Enabled magic packet successfully on %s.\n",
3867                                    qdev->ndev->name);
3868
3869                 wol |= MB_WOL_MAGIC_PKT;
3870         }
3871
3872         if (qdev->wol) {
3873                 wol |= MB_WOL_MODE_ON;
3874                 status = ql_mb_wol_mode(qdev, wol);
3875                 netif_err(qdev, drv, qdev->ndev,
3876                           "WOL %s (wol code 0x%x) on %s\n",
3877                           (status == 0) ? "Successfully set" : "Failed",
3878                           wol, qdev->ndev->name);
3879         }
3880
3881         return status;
3882 }
3883
3884 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3885 {
3886
3887         /* Don't kill the reset worker thread if we
3888          * are in the process of recovery.
3889          */
3890         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3891                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3892         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3893         cancel_delayed_work_sync(&qdev->mpi_work);
3894         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3895         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3896         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3897 }
3898
3899 static int ql_adapter_down(struct ql_adapter *qdev)
3900 {
3901         int i, status = 0;
3902
3903         ql_link_off(qdev);
3904
3905         ql_cancel_all_work_sync(qdev);
3906
3907         for (i = 0; i < qdev->rss_ring_count; i++)
3908                 napi_disable(&qdev->rx_ring[i].napi);
3909
3910         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3911
3912         ql_disable_interrupts(qdev);
3913
3914         ql_tx_ring_clean(qdev);
3915
3916         /* Call netif_napi_del() from common point.
3917          */
3918         for (i = 0; i < qdev->rss_ring_count; i++)
3919                 netif_napi_del(&qdev->rx_ring[i].napi);
3920
3921         status = ql_adapter_reset(qdev);
3922         if (status)
3923                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3924                           qdev->func);
3925         ql_free_rx_buffers(qdev);
3926
3927         return status;
3928 }
3929
3930 static int ql_adapter_up(struct ql_adapter *qdev)
3931 {
3932         int err = 0;
3933
3934         err = ql_adapter_initialize(qdev);
3935         if (err) {
3936                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3937                 goto err_init;
3938         }
3939         set_bit(QL_ADAPTER_UP, &qdev->flags);
3940         ql_alloc_rx_buffers(qdev);
3941         /* If the port is initialized and the
3942          * link is up the turn on the carrier.
3943          */
3944         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3945                         (ql_read32(qdev, STS) & qdev->port_link_up))
3946                 ql_link_on(qdev);
3947         /* Restore rx mode. */
3948         clear_bit(QL_ALLMULTI, &qdev->flags);
3949         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3950         qlge_set_multicast_list(qdev->ndev);
3951
3952         /* Restore vlan setting. */
3953         qlge_restore_vlan(qdev);
3954
3955         ql_enable_interrupts(qdev);
3956         ql_enable_all_completion_interrupts(qdev);
3957         netif_tx_start_all_queues(qdev->ndev);
3958
3959         return 0;
3960 err_init:
3961         ql_adapter_reset(qdev);
3962         return err;
3963 }
3964
3965 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3966 {
3967         ql_free_mem_resources(qdev);
3968         ql_free_irq(qdev);
3969 }
3970
3971 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3972 {
3973         int status = 0;
3974
3975         if (ql_alloc_mem_resources(qdev)) {
3976                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3977                 return -ENOMEM;
3978         }
3979         status = ql_request_irq(qdev);
3980         return status;
3981 }
3982
3983 static int qlge_close(struct net_device *ndev)
3984 {
3985         struct ql_adapter *qdev = netdev_priv(ndev);
3986
3987         /* If we hit pci_channel_io_perm_failure
3988          * failure condition, then we already
3989          * brought the adapter down.
3990          */
3991         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3992                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3993                 clear_bit(QL_EEH_FATAL, &qdev->flags);
3994                 return 0;
3995         }
3996
3997         /*
3998          * Wait for device to recover from a reset.
3999          * (Rarely happens, but possible.)
4000          */
4001         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4002                 msleep(1);
4003         ql_adapter_down(qdev);
4004         ql_release_adapter_resources(qdev);
4005         return 0;
4006 }
4007
4008 static int ql_configure_rings(struct ql_adapter *qdev)
4009 {
4010         int i;
4011         struct rx_ring *rx_ring;
4012         struct tx_ring *tx_ring;
4013         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4014         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4015                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4016
4017         qdev->lbq_buf_order = get_order(lbq_buf_len);
4018
4019         /* In a perfect world we have one RSS ring for each CPU
4020          * and each has it's own vector.  To do that we ask for
4021          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4022          * vector count to what we actually get.  We then
4023          * allocate an RSS ring for each.
4024          * Essentially, we are doing min(cpu_count, msix_vector_count).
4025          */
4026         qdev->intr_count = cpu_cnt;
4027         ql_enable_msix(qdev);
4028         /* Adjust the RSS ring count to the actual vector count. */
4029         qdev->rss_ring_count = qdev->intr_count;
4030         qdev->tx_ring_count = cpu_cnt;
4031         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4032
4033         for (i = 0; i < qdev->tx_ring_count; i++) {
4034                 tx_ring = &qdev->tx_ring[i];
4035                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4036                 tx_ring->qdev = qdev;
4037                 tx_ring->wq_id = i;
4038                 tx_ring->wq_len = qdev->tx_ring_size;
4039                 tx_ring->wq_size =
4040                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4041
4042                 /*
4043                  * The completion queue ID for the tx rings start
4044                  * immediately after the rss rings.
4045                  */
4046                 tx_ring->cq_id = qdev->rss_ring_count + i;
4047         }
4048
4049         for (i = 0; i < qdev->rx_ring_count; i++) {
4050                 rx_ring = &qdev->rx_ring[i];
4051                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4052                 rx_ring->qdev = qdev;
4053                 rx_ring->cq_id = i;
4054                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4055                 if (i < qdev->rss_ring_count) {
4056                         /*
4057                          * Inbound (RSS) queues.
4058                          */
4059                         rx_ring->cq_len = qdev->rx_ring_size;
4060                         rx_ring->cq_size =
4061                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4062                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4063                         rx_ring->lbq_size =
4064                             rx_ring->lbq_len * sizeof(__le64);
4065                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4066                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4067                         rx_ring->sbq_size =
4068                             rx_ring->sbq_len * sizeof(__le64);
4069                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4070                         rx_ring->type = RX_Q;
4071                 } else {
4072                         /*
4073                          * Outbound queue handles outbound completions only.
4074                          */
4075                         /* outbound cq is same size as tx_ring it services. */
4076                         rx_ring->cq_len = qdev->tx_ring_size;
4077                         rx_ring->cq_size =
4078                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4079                         rx_ring->lbq_len = 0;
4080                         rx_ring->lbq_size = 0;
4081                         rx_ring->lbq_buf_size = 0;
4082                         rx_ring->sbq_len = 0;
4083                         rx_ring->sbq_size = 0;
4084                         rx_ring->sbq_buf_size = 0;
4085                         rx_ring->type = TX_Q;
4086                 }
4087         }
4088         return 0;
4089 }
4090
4091 static int qlge_open(struct net_device *ndev)
4092 {
4093         int err = 0;
4094         struct ql_adapter *qdev = netdev_priv(ndev);
4095
4096         err = ql_adapter_reset(qdev);
4097         if (err)
4098                 return err;
4099
4100         err = ql_configure_rings(qdev);
4101         if (err)
4102                 return err;
4103
4104         err = ql_get_adapter_resources(qdev);
4105         if (err)
4106                 goto error_up;
4107
4108         err = ql_adapter_up(qdev);
4109         if (err)
4110                 goto error_up;
4111
4112         return err;
4113
4114 error_up:
4115         ql_release_adapter_resources(qdev);
4116         return err;
4117 }
4118
4119 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4120 {
4121         struct rx_ring *rx_ring;
4122         int i, status;
4123         u32 lbq_buf_len;
4124
4125         /* Wait for an outstanding reset to complete. */
4126         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4127                 int i = 3;
4128                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4129                         netif_err(qdev, ifup, qdev->ndev,
4130                                   "Waiting for adapter UP...\n");
4131                         ssleep(1);
4132                 }
4133
4134                 if (!i) {
4135                         netif_err(qdev, ifup, qdev->ndev,
4136                                   "Timed out waiting for adapter UP\n");
4137                         return -ETIMEDOUT;
4138                 }
4139         }
4140
4141         status = ql_adapter_down(qdev);
4142         if (status)
4143                 goto error;
4144
4145         /* Get the new rx buffer size. */
4146         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4147                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4148         qdev->lbq_buf_order = get_order(lbq_buf_len);
4149
4150         for (i = 0; i < qdev->rss_ring_count; i++) {
4151                 rx_ring = &qdev->rx_ring[i];
4152                 /* Set the new size. */
4153                 rx_ring->lbq_buf_size = lbq_buf_len;
4154         }
4155
4156         status = ql_adapter_up(qdev);
4157         if (status)
4158                 goto error;
4159
4160         return status;
4161 error:
4162         netif_alert(qdev, ifup, qdev->ndev,
4163                     "Driver up/down cycle failed, closing device.\n");
4164         set_bit(QL_ADAPTER_UP, &qdev->flags);
4165         dev_close(qdev->ndev);
4166         return status;
4167 }
4168
4169 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4170 {
4171         struct ql_adapter *qdev = netdev_priv(ndev);
4172         int status;
4173
4174         if (ndev->mtu == 1500 && new_mtu == 9000) {
4175                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4176         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4177                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4178         } else
4179                 return -EINVAL;
4180
4181         queue_delayed_work(qdev->workqueue,
4182                         &qdev->mpi_port_cfg_work, 3*HZ);
4183
4184         ndev->mtu = new_mtu;
4185
4186         if (!netif_running(qdev->ndev)) {
4187                 return 0;
4188         }
4189
4190         status = ql_change_rx_buffers(qdev);
4191         if (status) {
4192                 netif_err(qdev, ifup, qdev->ndev,
4193                           "Changing MTU failed.\n");
4194         }
4195
4196         return status;
4197 }
4198
4199 static struct net_device_stats *qlge_get_stats(struct net_device
4200                                                *ndev)
4201 {
4202         struct ql_adapter *qdev = netdev_priv(ndev);
4203         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4204         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4205         unsigned long pkts, mcast, dropped, errors, bytes;
4206         int i;
4207
4208         /* Get RX stats. */
4209         pkts = mcast = dropped = errors = bytes = 0;
4210         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4211                         pkts += rx_ring->rx_packets;
4212                         bytes += rx_ring->rx_bytes;
4213                         dropped += rx_ring->rx_dropped;
4214                         errors += rx_ring->rx_errors;
4215                         mcast += rx_ring->rx_multicast;
4216         }
4217         ndev->stats.rx_packets = pkts;
4218         ndev->stats.rx_bytes = bytes;
4219         ndev->stats.rx_dropped = dropped;
4220         ndev->stats.rx_errors = errors;
4221         ndev->stats.multicast = mcast;
4222
4223         /* Get TX stats. */
4224         pkts = errors = bytes = 0;
4225         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4226                         pkts += tx_ring->tx_packets;
4227                         bytes += tx_ring->tx_bytes;
4228                         errors += tx_ring->tx_errors;
4229         }
4230         ndev->stats.tx_packets = pkts;
4231         ndev->stats.tx_bytes = bytes;
4232         ndev->stats.tx_errors = errors;
4233         return &ndev->stats;
4234 }
4235
4236 static void qlge_set_multicast_list(struct net_device *ndev)
4237 {
4238         struct ql_adapter *qdev = netdev_priv(ndev);
4239         struct netdev_hw_addr *ha;
4240         int i, status;
4241
4242         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4243         if (status)
4244                 return;
4245         /*
4246          * Set or clear promiscuous mode if a
4247          * transition is taking place.
4248          */
4249         if (ndev->flags & IFF_PROMISC) {
4250                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4251                         if (ql_set_routing_reg
4252                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4253                                 netif_err(qdev, hw, qdev->ndev,
4254                                           "Failed to set promiscuous mode.\n");
4255                         } else {
4256                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4257                         }
4258                 }
4259         } else {
4260                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4261                         if (ql_set_routing_reg
4262                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4263                                 netif_err(qdev, hw, qdev->ndev,
4264                                           "Failed to clear promiscuous mode.\n");
4265                         } else {
4266                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4267                         }
4268                 }
4269         }
4270
4271         /*
4272          * Set or clear all multicast mode if a
4273          * transition is taking place.
4274          */
4275         if ((ndev->flags & IFF_ALLMULTI) ||
4276             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4277                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4278                         if (ql_set_routing_reg
4279                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4280                                 netif_err(qdev, hw, qdev->ndev,
4281                                           "Failed to set all-multi mode.\n");
4282                         } else {
4283                                 set_bit(QL_ALLMULTI, &qdev->flags);
4284                         }
4285                 }
4286         } else {
4287                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4288                         if (ql_set_routing_reg
4289                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4290                                 netif_err(qdev, hw, qdev->ndev,
4291                                           "Failed to clear all-multi mode.\n");
4292                         } else {
4293                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4294                         }
4295                 }
4296         }
4297
4298         if (!netdev_mc_empty(ndev)) {
4299                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4300                 if (status)
4301                         goto exit;
4302                 i = 0;
4303                 netdev_for_each_mc_addr(ha, ndev) {
4304                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4305                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4306                                 netif_err(qdev, hw, qdev->ndev,
4307                                           "Failed to loadmulticast address.\n");
4308                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4309                                 goto exit;
4310                         }
4311                         i++;
4312                 }
4313                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4314                 if (ql_set_routing_reg
4315                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4316                         netif_err(qdev, hw, qdev->ndev,
4317                                   "Failed to set multicast match mode.\n");
4318                 } else {
4319                         set_bit(QL_ALLMULTI, &qdev->flags);
4320                 }
4321         }
4322 exit:
4323         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4324 }
4325
4326 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4327 {
4328         struct ql_adapter *qdev = netdev_priv(ndev);
4329         struct sockaddr *addr = p;
4330         int status;
4331
4332         if (!is_valid_ether_addr(addr->sa_data))
4333                 return -EADDRNOTAVAIL;
4334         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4335         /* Update local copy of current mac address. */
4336         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4337
4338         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4339         if (status)
4340                 return status;
4341         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4342                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4343         if (status)
4344                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4345         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4346         return status;
4347 }
4348
4349 static void qlge_tx_timeout(struct net_device *ndev)
4350 {
4351         struct ql_adapter *qdev = netdev_priv(ndev);
4352         ql_queue_asic_error(qdev);
4353 }
4354
4355 static void ql_asic_reset_work(struct work_struct *work)
4356 {
4357         struct ql_adapter *qdev =
4358             container_of(work, struct ql_adapter, asic_reset_work.work);
4359         int status;
4360         rtnl_lock();
4361         status = ql_adapter_down(qdev);
4362         if (status)
4363                 goto error;
4364
4365         status = ql_adapter_up(qdev);
4366         if (status)
4367                 goto error;
4368
4369         /* Restore rx mode. */
4370         clear_bit(QL_ALLMULTI, &qdev->flags);
4371         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4372         qlge_set_multicast_list(qdev->ndev);
4373
4374         rtnl_unlock();
4375         return;
4376 error:
4377         netif_alert(qdev, ifup, qdev->ndev,
4378                     "Driver up/down cycle failed, closing device\n");
4379
4380         set_bit(QL_ADAPTER_UP, &qdev->flags);
4381         dev_close(qdev->ndev);
4382         rtnl_unlock();
4383 }
4384
4385 static const struct nic_operations qla8012_nic_ops = {
4386         .get_flash              = ql_get_8012_flash_params,
4387         .port_initialize        = ql_8012_port_initialize,
4388 };
4389
4390 static const struct nic_operations qla8000_nic_ops = {
4391         .get_flash              = ql_get_8000_flash_params,
4392         .port_initialize        = ql_8000_port_initialize,
4393 };
4394
4395 /* Find the pcie function number for the other NIC
4396  * on this chip.  Since both NIC functions share a
4397  * common firmware we have the lowest enabled function
4398  * do any common work.  Examples would be resetting
4399  * after a fatal firmware error, or doing a firmware
4400  * coredump.
4401  */
4402 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4403 {
4404         int status = 0;
4405         u32 temp;
4406         u32 nic_func1, nic_func2;
4407
4408         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4409                         &temp);
4410         if (status)
4411                 return status;
4412
4413         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4414                         MPI_TEST_NIC_FUNC_MASK);
4415         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4416                         MPI_TEST_NIC_FUNC_MASK);
4417
4418         if (qdev->func == nic_func1)
4419                 qdev->alt_func = nic_func2;
4420         else if (qdev->func == nic_func2)
4421                 qdev->alt_func = nic_func1;
4422         else
4423                 status = -EIO;
4424
4425         return status;
4426 }
4427
4428 static int ql_get_board_info(struct ql_adapter *qdev)
4429 {
4430         int status;
4431         qdev->func =
4432             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4433         if (qdev->func > 3)
4434                 return -EIO;
4435
4436         status = ql_get_alt_pcie_func(qdev);
4437         if (status)
4438                 return status;
4439
4440         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4441         if (qdev->port) {
4442                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4443                 qdev->port_link_up = STS_PL1;
4444                 qdev->port_init = STS_PI1;
4445                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4446                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4447         } else {
4448                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4449                 qdev->port_link_up = STS_PL0;
4450                 qdev->port_init = STS_PI0;
4451                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4452                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4453         }
4454         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4455         qdev->device_id = qdev->pdev->device;
4456         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4457                 qdev->nic_ops = &qla8012_nic_ops;
4458         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4459                 qdev->nic_ops = &qla8000_nic_ops;
4460         return status;
4461 }
4462
4463 static void ql_release_all(struct pci_dev *pdev)
4464 {
4465         struct net_device *ndev = pci_get_drvdata(pdev);
4466         struct ql_adapter *qdev = netdev_priv(ndev);
4467
4468         if (qdev->workqueue) {
4469                 destroy_workqueue(qdev->workqueue);
4470                 qdev->workqueue = NULL;
4471         }
4472
4473         if (qdev->reg_base)
4474                 iounmap(qdev->reg_base);
4475         if (qdev->doorbell_area)
4476                 iounmap(qdev->doorbell_area);
4477         vfree(qdev->mpi_coredump);
4478         pci_release_regions(pdev);
4479         pci_set_drvdata(pdev, NULL);
4480 }
4481
4482 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4483                           int cards_found)
4484 {
4485         struct ql_adapter *qdev = netdev_priv(ndev);
4486         int err = 0;
4487
4488         memset((void *)qdev, 0, sizeof(*qdev));
4489         err = pci_enable_device(pdev);
4490         if (err) {
4491                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4492                 return err;
4493         }
4494
4495         qdev->ndev = ndev;
4496         qdev->pdev = pdev;
4497         pci_set_drvdata(pdev, ndev);
4498
4499         /* Set PCIe read request size */
4500         err = pcie_set_readrq(pdev, 4096);
4501         if (err) {
4502                 dev_err(&pdev->dev, "Set readrq failed.\n");
4503                 goto err_out1;
4504         }
4505
4506         err = pci_request_regions(pdev, DRV_NAME);
4507         if (err) {
4508                 dev_err(&pdev->dev, "PCI region request failed.\n");
4509                 return err;
4510         }
4511
4512         pci_set_master(pdev);
4513         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4514                 set_bit(QL_DMA64, &qdev->flags);
4515                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4516         } else {
4517                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4518                 if (!err)
4519                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4520         }
4521
4522         if (err) {
4523                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4524                 goto err_out2;
4525         }
4526
4527         /* Set PCIe reset type for EEH to fundamental. */
4528         pdev->needs_freset = 1;
4529         pci_save_state(pdev);
4530         qdev->reg_base =
4531             ioremap_nocache(pci_resource_start(pdev, 1),
4532                             pci_resource_len(pdev, 1));
4533         if (!qdev->reg_base) {
4534                 dev_err(&pdev->dev, "Register mapping failed.\n");
4535                 err = -ENOMEM;
4536                 goto err_out2;
4537         }
4538
4539         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4540         qdev->doorbell_area =
4541             ioremap_nocache(pci_resource_start(pdev, 3),
4542                             pci_resource_len(pdev, 3));
4543         if (!qdev->doorbell_area) {
4544                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4545                 err = -ENOMEM;
4546                 goto err_out2;
4547         }
4548
4549         err = ql_get_board_info(qdev);
4550         if (err) {
4551                 dev_err(&pdev->dev, "Register access failed.\n");
4552                 err = -EIO;
4553                 goto err_out2;
4554         }
4555         qdev->msg_enable = netif_msg_init(debug, default_msg);
4556         spin_lock_init(&qdev->hw_lock);
4557         spin_lock_init(&qdev->stats_lock);
4558
4559         if (qlge_mpi_coredump) {
4560                 qdev->mpi_coredump =
4561                         vmalloc(sizeof(struct ql_mpi_coredump));
4562                 if (qdev->mpi_coredump == NULL) {
4563                         err = -ENOMEM;
4564                         goto err_out2;
4565                 }
4566                 if (qlge_force_coredump)
4567                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4568         }
4569         /* make sure the EEPROM is good */
4570         err = qdev->nic_ops->get_flash(qdev);
4571         if (err) {
4572                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4573                 goto err_out2;
4574         }
4575
4576         /* Keep local copy of current mac address. */
4577         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4578
4579         /* Set up the default ring sizes. */
4580         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4581         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4582
4583         /* Set up the coalescing parameters. */
4584         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4585         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4586         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4587         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4588
4589         /*
4590          * Set up the operating parameters.
4591          */
4592         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4593         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4594         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4595         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4596         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4597         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4598         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4599         init_completion(&qdev->ide_completion);
4600         mutex_init(&qdev->mpi_mutex);
4601
4602         if (!cards_found) {
4603                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4604                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4605                          DRV_NAME, DRV_VERSION);
4606         }
4607         return 0;
4608 err_out2:
4609         ql_release_all(pdev);
4610 err_out1:
4611         pci_disable_device(pdev);
4612         return err;
4613 }
4614
4615 static const struct net_device_ops qlge_netdev_ops = {
4616         .ndo_open               = qlge_open,
4617         .ndo_stop               = qlge_close,
4618         .ndo_start_xmit         = qlge_send,
4619         .ndo_change_mtu         = qlge_change_mtu,
4620         .ndo_get_stats          = qlge_get_stats,
4621         .ndo_set_rx_mode        = qlge_set_multicast_list,
4622         .ndo_set_mac_address    = qlge_set_mac_address,
4623         .ndo_validate_addr      = eth_validate_addr,
4624         .ndo_tx_timeout         = qlge_tx_timeout,
4625         .ndo_fix_features       = qlge_fix_features,
4626         .ndo_set_features       = qlge_set_features,
4627         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4628         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4629 };
4630
4631 static void ql_timer(unsigned long data)
4632 {
4633         struct ql_adapter *qdev = (struct ql_adapter *)data;
4634         u32 var = 0;
4635
4636         var = ql_read32(qdev, STS);
4637         if (pci_channel_offline(qdev->pdev)) {
4638                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4639                 return;
4640         }
4641
4642         mod_timer(&qdev->timer, jiffies + (5*HZ));
4643 }
4644
4645 static int qlge_probe(struct pci_dev *pdev,
4646                       const struct pci_device_id *pci_entry)
4647 {
4648         struct net_device *ndev = NULL;
4649         struct ql_adapter *qdev = NULL;
4650         static int cards_found = 0;
4651         int err = 0;
4652
4653         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4654                         min(MAX_CPUS, netif_get_num_default_rss_queues()));
4655         if (!ndev)
4656                 return -ENOMEM;
4657
4658         err = ql_init_device(pdev, ndev, cards_found);
4659         if (err < 0) {
4660                 free_netdev(ndev);
4661                 return err;
4662         }
4663
4664         qdev = netdev_priv(ndev);
4665         SET_NETDEV_DEV(ndev, &pdev->dev);
4666         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4667                 NETIF_F_TSO | NETIF_F_TSO_ECN |
4668                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4669         ndev->features = ndev->hw_features |
4670                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4671         ndev->vlan_features = ndev->hw_features;
4672
4673         if (test_bit(QL_DMA64, &qdev->flags))
4674                 ndev->features |= NETIF_F_HIGHDMA;
4675
4676         /*
4677          * Set up net_device structure.
4678          */
4679         ndev->tx_queue_len = qdev->tx_ring_size;
4680         ndev->irq = pdev->irq;
4681
4682         ndev->netdev_ops = &qlge_netdev_ops;
4683         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4684         ndev->watchdog_timeo = 10 * HZ;
4685
4686         err = register_netdev(ndev);
4687         if (err) {
4688                 dev_err(&pdev->dev, "net device registration failed.\n");
4689                 ql_release_all(pdev);
4690                 pci_disable_device(pdev);
4691                 return err;
4692         }
4693         /* Start up the timer to trigger EEH if
4694          * the bus goes dead
4695          */
4696         init_timer_deferrable(&qdev->timer);
4697         qdev->timer.data = (unsigned long)qdev;
4698         qdev->timer.function = ql_timer;
4699         qdev->timer.expires = jiffies + (5*HZ);
4700         add_timer(&qdev->timer);
4701         ql_link_off(qdev);
4702         ql_display_dev_info(ndev);
4703         atomic_set(&qdev->lb_count, 0);
4704         cards_found++;
4705         return 0;
4706 }
4707
4708 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4709 {
4710         return qlge_send(skb, ndev);
4711 }
4712
4713 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4714 {
4715         return ql_clean_inbound_rx_ring(rx_ring, budget);
4716 }
4717
4718 static void qlge_remove(struct pci_dev *pdev)
4719 {
4720         struct net_device *ndev = pci_get_drvdata(pdev);
4721         struct ql_adapter *qdev = netdev_priv(ndev);
4722         del_timer_sync(&qdev->timer);
4723         ql_cancel_all_work_sync(qdev);
4724         unregister_netdev(ndev);
4725         ql_release_all(pdev);
4726         pci_disable_device(pdev);
4727         free_netdev(ndev);
4728 }
4729
4730 /* Clean up resources without touching hardware. */
4731 static void ql_eeh_close(struct net_device *ndev)
4732 {
4733         int i;
4734         struct ql_adapter *qdev = netdev_priv(ndev);
4735
4736         if (netif_carrier_ok(ndev)) {
4737                 netif_carrier_off(ndev);
4738                 netif_stop_queue(ndev);
4739         }
4740
4741         /* Disabling the timer */
4742         del_timer_sync(&qdev->timer);
4743         ql_cancel_all_work_sync(qdev);
4744
4745         for (i = 0; i < qdev->rss_ring_count; i++)
4746                 netif_napi_del(&qdev->rx_ring[i].napi);
4747
4748         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4749         ql_tx_ring_clean(qdev);
4750         ql_free_rx_buffers(qdev);
4751         ql_release_adapter_resources(qdev);
4752 }
4753
4754 /*
4755  * This callback is called by the PCI subsystem whenever
4756  * a PCI bus error is detected.
4757  */
4758 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4759                                                enum pci_channel_state state)
4760 {
4761         struct net_device *ndev = pci_get_drvdata(pdev);
4762         struct ql_adapter *qdev = netdev_priv(ndev);
4763
4764         switch (state) {
4765         case pci_channel_io_normal:
4766                 return PCI_ERS_RESULT_CAN_RECOVER;
4767         case pci_channel_io_frozen:
4768                 netif_device_detach(ndev);
4769                 if (netif_running(ndev))
4770                         ql_eeh_close(ndev);
4771                 pci_disable_device(pdev);
4772                 return PCI_ERS_RESULT_NEED_RESET;
4773         case pci_channel_io_perm_failure:
4774                 dev_err(&pdev->dev,
4775                         "%s: pci_channel_io_perm_failure.\n", __func__);
4776                 ql_eeh_close(ndev);
4777                 set_bit(QL_EEH_FATAL, &qdev->flags);
4778                 return PCI_ERS_RESULT_DISCONNECT;
4779         }
4780
4781         /* Request a slot reset. */
4782         return PCI_ERS_RESULT_NEED_RESET;
4783 }
4784
4785 /*
4786  * This callback is called after the PCI buss has been reset.
4787  * Basically, this tries to restart the card from scratch.
4788  * This is a shortened version of the device probe/discovery code,
4789  * it resembles the first-half of the () routine.
4790  */
4791 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4792 {
4793         struct net_device *ndev = pci_get_drvdata(pdev);
4794         struct ql_adapter *qdev = netdev_priv(ndev);
4795
4796         pdev->error_state = pci_channel_io_normal;
4797
4798         pci_restore_state(pdev);
4799         if (pci_enable_device(pdev)) {
4800                 netif_err(qdev, ifup, qdev->ndev,
4801                           "Cannot re-enable PCI device after reset.\n");
4802                 return PCI_ERS_RESULT_DISCONNECT;
4803         }
4804         pci_set_master(pdev);
4805
4806         if (ql_adapter_reset(qdev)) {
4807                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4808                 set_bit(QL_EEH_FATAL, &qdev->flags);
4809                 return PCI_ERS_RESULT_DISCONNECT;
4810         }
4811
4812         return PCI_ERS_RESULT_RECOVERED;
4813 }
4814
4815 static void qlge_io_resume(struct pci_dev *pdev)
4816 {
4817         struct net_device *ndev = pci_get_drvdata(pdev);
4818         struct ql_adapter *qdev = netdev_priv(ndev);
4819         int err = 0;
4820
4821         if (netif_running(ndev)) {
4822                 err = qlge_open(ndev);
4823                 if (err) {
4824                         netif_err(qdev, ifup, qdev->ndev,
4825                                   "Device initialization failed after reset.\n");
4826                         return;
4827                 }
4828         } else {
4829                 netif_err(qdev, ifup, qdev->ndev,
4830                           "Device was not running prior to EEH.\n");
4831         }
4832         mod_timer(&qdev->timer, jiffies + (5*HZ));
4833         netif_device_attach(ndev);
4834 }
4835
4836 static const struct pci_error_handlers qlge_err_handler = {
4837         .error_detected = qlge_io_error_detected,
4838         .slot_reset = qlge_io_slot_reset,
4839         .resume = qlge_io_resume,
4840 };
4841
4842 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4843 {
4844         struct net_device *ndev = pci_get_drvdata(pdev);
4845         struct ql_adapter *qdev = netdev_priv(ndev);
4846         int err;
4847
4848         netif_device_detach(ndev);
4849         del_timer_sync(&qdev->timer);
4850
4851         if (netif_running(ndev)) {
4852                 err = ql_adapter_down(qdev);
4853                 if (!err)
4854                         return err;
4855         }
4856
4857         ql_wol(qdev);
4858         err = pci_save_state(pdev);
4859         if (err)
4860                 return err;
4861
4862         pci_disable_device(pdev);
4863
4864         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4865
4866         return 0;
4867 }
4868
4869 #ifdef CONFIG_PM
4870 static int qlge_resume(struct pci_dev *pdev)
4871 {
4872         struct net_device *ndev = pci_get_drvdata(pdev);
4873         struct ql_adapter *qdev = netdev_priv(ndev);
4874         int err;
4875
4876         pci_set_power_state(pdev, PCI_D0);
4877         pci_restore_state(pdev);
4878         err = pci_enable_device(pdev);
4879         if (err) {
4880                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4881                 return err;
4882         }
4883         pci_set_master(pdev);
4884
4885         pci_enable_wake(pdev, PCI_D3hot, 0);
4886         pci_enable_wake(pdev, PCI_D3cold, 0);
4887
4888         if (netif_running(ndev)) {
4889                 err = ql_adapter_up(qdev);
4890                 if (err)
4891                         return err;
4892         }
4893
4894         mod_timer(&qdev->timer, jiffies + (5*HZ));
4895         netif_device_attach(ndev);
4896
4897         return 0;
4898 }
4899 #endif /* CONFIG_PM */
4900
4901 static void qlge_shutdown(struct pci_dev *pdev)
4902 {
4903         qlge_suspend(pdev, PMSG_SUSPEND);
4904 }
4905
4906 static struct pci_driver qlge_driver = {
4907         .name = DRV_NAME,
4908         .id_table = qlge_pci_tbl,
4909         .probe = qlge_probe,
4910         .remove = qlge_remove,
4911 #ifdef CONFIG_PM
4912         .suspend = qlge_suspend,
4913         .resume = qlge_resume,
4914 #endif
4915         .shutdown = qlge_shutdown,
4916         .err_handler = &qlge_err_handler
4917 };
4918
4919 static int __init qlge_init_module(void)
4920 {
4921         return pci_register_driver(&qlge_driver);
4922 }
4923
4924 static void __exit qlge_exit(void)
4925 {
4926         pci_unregister_driver(&qlge_driver);
4927 }
4928
4929 module_init(qlge_init_module);
4930 module_exit(qlge_exit);