2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
386 ql_wait_reg_rdy(qdev,
387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
395 ql_wait_reg_rdy(qdev,
396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
404 ql_wait_reg_rdy(qdev,
405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
415 cam_output = (CAM_OUT_ROUTE_NIC |
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
425 case MAC_ADDR_TYPE_VLAN:
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
437 enable_bit ? "to" : "from");
440 ql_wait_reg_rdy(qdev,
441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
447 enable_bit); /* enable/disable */
450 case MAC_ADDR_TYPE_MULTI_FLTR:
452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
460 /* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
467 char zero_mac_addr[ETH_ALEN];
471 addr = &qdev->current_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
492 void ql_link_on(struct ql_adapter *qdev)
494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
499 void ql_link_off(struct ql_adapter *qdev)
501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
506 /* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
522 *value = ql_read32(qdev, RT_DATA);
527 /* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
535 int status = -EINVAL; /* Return error if no mask match. */
538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
614 value = RT_IDX_DST_DFLT_Q | /* dest */
615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
626 case 0: /* Clear the E-bit on an entry. */
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
678 ql_write32(qdev, INTR_EN,
680 var = ql_read32(qdev, STS);
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
688 var = ql_read32(qdev, STS);
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
697 struct intr_context *ctx;
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
705 ctx = qdev->intr_context + intr;
706 spin_lock(&qdev->hw_lock);
707 if (!atomic_read(&ctx->irq_cnt)) {
708 ql_write32(qdev, INTR_EN,
710 var = ql_read32(qdev, STS);
712 atomic_inc(&ctx->irq_cnt);
713 spin_unlock(&qdev->hw_lock);
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728 ql_enable_completion_interrupt(qdev, i);
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
737 __le16 *flash = (__le16 *)&qdev->flash;
739 status = strncmp((char *)&qdev->flash, str, 4);
741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
783 __le32 *p = (__le32 *)&qdev->flash;
787 /* Get flash offset for function and adjust
791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
817 /* Extract either manufacturer or BOFM modified
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
829 if (!is_valid_ether_addr(mac_addr)) {
830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
835 memcpy(qdev->ndev->dev_addr,
837 qdev->ndev->addr_len);
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
848 __le32 *p = (__le32 *)&qdev->flash;
850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
852 /* Second function's parameters follow the first
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
861 for (i = 0; i < size; i++, p++) {
862 status = ql_read_flash_word(qdev, i+offset, p);
864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
933 *data = ql_read32(qdev, XGMAC_DATA);
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
953 *data = (u64) lo | ((u64) hi << 32);
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
963 * Get MPI firmware version for driver banner
966 status = ql_mb_about_fw(qdev);
969 status = ql_mb_get_fw_state(qdev);
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
978 /* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
1003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1042 /* Turn on jumbo. */
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
1081 dma_unmap_addr(lbq_desc, mapaddr),
1082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1085 /* If it's the last chunk of our master page then
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1127 if (!rx_ring->pg_chunk.page) {
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
1133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
1144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1157 /* Adjust the master page chunk for next
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
1176 struct bq_desc *lbq_desc;
1180 while (rx_ring->lbq_free_cnt > 32) {
1181 for (i = 0; i < 16; i++) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1185 lbq_desc = &rx_ring->lbq[clean_idx];
1186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
1194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
1196 rx_ring->lbq_buf_size);
1197 *lbq_desc->addr = cpu_to_le64(map);
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
1203 if (clean_idx == rx_ring->lbq_len)
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
1211 rx_ring->lbq_free_cnt -= 16;
1214 if (start_idx != clean_idx) {
1215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
1218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
1228 struct bq_desc *sbq_desc;
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
1235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1238 if (sbq_desc->p.skb == NULL) {
1239 netif_printk(qdev, rx_status, KERN_DEBUG,
1241 "sbq: getting new skb for index %d.\n",
1244 netdev_alloc_skb(qdev->ndev,
1246 if (sbq_desc->p.skb == NULL) {
1247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
1249 rx_ring->sbq_clean_idx = clean_idx;
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
1255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
1257 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
1260 rx_ring->sbq_clean_idx = clean_idx;
1261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
1265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
1267 rx_ring->sbq_buf_size);
1268 *sbq_desc->addr = cpu_to_le64(map);
1272 if (clean_idx == rx_ring->sbq_len)
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
1279 rx_ring->sbq_free_cnt -= 16;
1282 if (start_idx != clean_idx) {
1283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
1286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1298 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1317 netif_printk(qdev, tx_done, KERN_DEBUG,
1319 "unmapping OAL area.\n");
1321 pci_unmap_single(qdev->pdev,
1322 dma_unmap_addr(&tx_ring_desc->map[i],
1324 dma_unmap_len(&tx_ring_desc->map[i],
1328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
1330 pci_unmap_page(qdev->pdev,
1331 dma_unmap_addr(&tx_ring_desc->map[i],
1333 dma_unmap_len(&tx_ring_desc->map[i],
1334 maplen), PCI_DMA_TODEVICE);
1340 /* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1347 int len = skb_headlen(skb);
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
1358 * Map the skb buffer first.
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
1367 return NETDEV_TX_BUSY;
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
1372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1417 tbd->addr = cpu_to_le64(map);
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
1426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1437 err = dma_mapping_error(&qdev->pdev->dev, map);
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449 skb_frag_size(frag));
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct napi_struct *napi = &rx_ring->napi;
1480 napi->dev = qdev->ndev;
1482 skb = napi_get_frags(napi);
1484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
1486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1490 prefetch(lbq_desc->p.pg_chunk.va);
1491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
1505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1523 skb = netdev_alloc_skb(ndev, length);
1525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
1527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1532 addr = lbq_desc->p.pg_chunk.va;
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538 netif_info(qdev, drv, qdev->ndev,
1539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540 rx_ring->rx_errors++;
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
1548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
1550 rx_ring->rx_dropped++;
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
1567 skb_checksum_none_assert(skb);
1569 if ((ndev->features & NETIF_F_RXCSUM) &&
1570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
1575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 netif_printk(qdev, rx_status, KERN_DEBUG,
1585 "TCP checksum done!\n");
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
1591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1596 netif_receive_skb(skb);
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
1619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
1621 rx_ring->rx_dropped++;
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1625 memcpy(skb_put(new_skb, length), skb->data, length);
1628 /* Frame error, so drop the packet. */
1629 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1630 netif_info(qdev, drv, qdev->ndev,
1631 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_errors++;
1637 /* loopback self test for ethtool */
1638 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639 ql_check_lb_frame(qdev, skb);
1640 dev_kfree_skb_any(skb);
1644 /* The max framesize filter on this chip is set higher than
1645 * MTU since FCoE uses 2k frames.
1647 if (skb->len > ndev->mtu + ETH_HLEN) {
1648 dev_kfree_skb_any(skb);
1649 rx_ring->rx_dropped++;
1653 prefetch(skb->data);
1655 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "Promiscuous Packet.\n");
1669 rx_ring->rx_packets++;
1670 rx_ring->rx_bytes += skb->len;
1671 skb->protocol = eth_type_trans(skb, ndev);
1672 skb_checksum_none_assert(skb);
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1677 if ((ndev->features & NETIF_F_RXCSUM) &&
1678 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1680 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 "TCP checksum done!\n");
1683 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr *iph = (struct iphdr *) skb->data;
1688 if (!(iph->frag_off &
1689 ntohs(IP_MF|IP_OFFSET))) {
1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 netif_printk(qdev, rx_status, KERN_DEBUG,
1693 "TCP checksum done!\n");
1698 skb_record_rx_queue(skb, rx_ring->cq_id);
1699 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb);
1704 netif_receive_skb(skb);
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1709 void *temp_addr = skb->data;
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1715 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb_copy_to_linear_data(skb, temp_addr,
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 struct rx_ring *rx_ring,
1728 struct ib_mac_iocb_rsp *ib_mac_rsp)
1730 struct bq_desc *lbq_desc;
1731 struct bq_desc *sbq_desc;
1732 struct sk_buff *skb = NULL;
1733 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1737 * Handle the header buffer if present.
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Header of %d bytes in small buffer.\n", hdr_len);
1744 * Headers fit nicely into a small buffer.
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_unmap_single(qdev->pdev,
1748 dma_unmap_addr(sbq_desc, mapaddr),
1749 dma_unmap_len(sbq_desc, maplen),
1750 PCI_DMA_FROMDEVICE);
1751 skb = sbq_desc->p.skb;
1752 ql_realign_skb(skb, hdr_len);
1753 skb_put(skb, hdr_len);
1754 sbq_desc->p.skb = NULL;
1758 * Handle the data buffer(s).
1760 if (unlikely(!length)) { /* Is there data too? */
1761 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 "No Data buffer in this packet.\n");
1766 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 pci_dma_sync_single_for_cpu(qdev->pdev,
1781 (sbq_desc, mapaddr),
1784 PCI_DMA_FROMDEVICE);
1785 memcpy(skb_put(skb, length),
1786 sbq_desc->p.skb->data, length);
1787 pci_dma_sync_single_for_device(qdev->pdev,
1794 PCI_DMA_FROMDEVICE);
1796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "%d bytes in a single small buffer.\n",
1799 sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 skb = sbq_desc->p.skb;
1801 ql_realign_skb(skb, length);
1802 skb_put(skb, length);
1803 pci_unmap_single(qdev->pdev,
1804 dma_unmap_addr(sbq_desc,
1806 dma_unmap_len(sbq_desc,
1808 PCI_DMA_FROMDEVICE);
1809 sbq_desc->p.skb = NULL;
1811 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc->p.pg_chunk.offset, length);
1825 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1829 skb->data_len += length;
1830 skb->truesize += length;
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838 skb = netdev_alloc_skb(qdev->ndev, length);
1840 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 "No skb available, drop the packet.\n");
1844 pci_unmap_page(qdev->pdev,
1845 dma_unmap_addr(lbq_desc,
1847 dma_unmap_len(lbq_desc, maplen),
1848 PCI_DMA_FROMDEVICE);
1849 skb_reserve(skb, NET_IP_ALIGN);
1850 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1853 skb_fill_page_desc(skb, 0,
1854 lbq_desc->p.pg_chunk.page,
1855 lbq_desc->p.pg_chunk.offset,
1858 skb->data_len += length;
1859 skb->truesize += length;
1861 __pskb_pull_tail(skb,
1862 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 VLAN_ETH_HLEN : ETH_HLEN);
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1878 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 pci_unmap_single(qdev->pdev,
1880 dma_unmap_addr(sbq_desc, mapaddr),
1881 dma_unmap_len(sbq_desc, maplen),
1882 PCI_DMA_FROMDEVICE);
1883 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1893 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 "%d bytes of headers & data in chain of large.\n",
1896 skb = sbq_desc->p.skb;
1897 sbq_desc->p.skb = NULL;
1898 skb_reserve(skb, NET_IP_ALIGN);
1900 while (length > 0) {
1901 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 size = (length < rx_ring->lbq_buf_size) ? length :
1903 rx_ring->lbq_buf_size;
1905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "Adding page %d to skb for %d bytes.\n",
1908 skb_fill_page_desc(skb, i,
1909 lbq_desc->p.pg_chunk.page,
1910 lbq_desc->p.pg_chunk.offset,
1913 skb->data_len += size;
1914 skb->truesize += size;
1918 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 VLAN_ETH_HLEN : ETH_HLEN);
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926 struct rx_ring *rx_ring,
1927 struct ib_mac_iocb_rsp *ib_mac_rsp,
1930 struct net_device *ndev = qdev->ndev;
1931 struct sk_buff *skb = NULL;
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1935 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 if (unlikely(!skb)) {
1937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "No skb available, drop packet.\n");
1939 rx_ring->rx_dropped++;
1943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945 netif_info(qdev, drv, qdev->ndev,
1946 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1947 dev_kfree_skb_any(skb);
1948 rx_ring->rx_errors++;
1952 /* The max framesize filter on this chip is set higher than
1953 * MTU since FCoE uses 2k frames.
1955 if (skb->len > ndev->mtu + ETH_HLEN) {
1956 dev_kfree_skb_any(skb);
1957 rx_ring->rx_dropped++;
1961 /* loopback self test for ethtool */
1962 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963 ql_check_lb_frame(qdev, skb);
1964 dev_kfree_skb_any(skb);
1968 prefetch(skb->data);
1970 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1978 rx_ring->rx_multicast++;
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1981 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982 "Promiscuous Packet.\n");
1985 skb->protocol = eth_type_trans(skb, ndev);
1986 skb_checksum_none_assert(skb);
1988 /* If rx checksum is on, and there are no
1989 * csum or frame errors.
1991 if ((ndev->features & NETIF_F_RXCSUM) &&
1992 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1994 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996 "TCP checksum done!\n");
1997 skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000 /* Unfragmented ipv4 UDP frame. */
2001 struct iphdr *iph = (struct iphdr *) skb->data;
2002 if (!(iph->frag_off &
2003 ntohs(IP_MF|IP_OFFSET))) {
2004 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "TCP checksum done!\n");
2011 rx_ring->rx_packets++;
2012 rx_ring->rx_bytes += skb->len;
2013 skb_record_rx_queue(skb, rx_ring->cq_id);
2014 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015 __vlan_hwaccel_put_tag(skb, vlan_id);
2016 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017 napi_gro_receive(&rx_ring->napi, skb);
2019 netif_receive_skb(skb);
2022 /* Process an inbound completion from an rx ring. */
2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024 struct rx_ring *rx_ring,
2025 struct ib_mac_iocb_rsp *ib_mac_rsp)
2027 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2032 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2034 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035 /* The data and headers are split into
2038 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2040 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041 /* The data fit in a single small buffer.
2042 * Allocate a new skb, copy the data and
2043 * return the buffer to the free pool.
2045 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2047 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050 /* TCP packet in a page chunk that's been checksummed.
2051 * Tack it on to our GRO skb and let it go.
2053 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2055 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056 /* Non-TCP packet in a page chunk. Allocate an
2057 * skb, tack it on frags, and send it up.
2059 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2062 /* Non-TCP/UDP large frames that span multiple buffers
2063 * can be processed corrrectly by the split frame logic.
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2069 return (unsigned long)length;
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
2085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093 netif_warn(qdev, tx_done, qdev->ndev,
2094 "Total descriptor length did not match transfer length.\n");
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097 netif_warn(qdev, tx_done, qdev->ndev,
2098 "Frame too short to be valid, not sent.\n");
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101 netif_warn(qdev, tx_done, qdev->ndev,
2102 "Frame too long, but sent anyway.\n");
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105 netif_warn(qdev, tx_done, qdev->ndev,
2106 "PCI backplane error. Frame not sent.\n");
2109 atomic_inc(&tx_ring->tx_count);
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2122 ql_disable_interrupts(qdev);
2123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128 /* Set asic recovery bit to indicate reset process that we are
2129 * in fatal error recovery process rather than normal close
2131 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2132 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2135 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136 struct ib_ae_iocb_rsp *ib_ae_rsp)
2138 switch (ib_ae_rsp->event) {
2139 case MGMT_ERR_EVENT:
2140 netif_err(qdev, rx_err, qdev->ndev,
2141 "Management Processor Fatal Error.\n");
2142 ql_queue_fw_error(qdev);
2145 case CAM_LOOKUP_ERR_EVENT:
2146 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2148 ql_queue_asic_error(qdev);
2151 case SOFT_ECC_ERROR_EVENT:
2152 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2153 ql_queue_asic_error(qdev);
2156 case PCI_ERR_ANON_BUF_RD:
2157 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158 "anonymous buffers from rx_ring %d.\n",
2160 ql_queue_asic_error(qdev);
2164 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2166 ql_queue_asic_error(qdev);
2171 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2173 struct ql_adapter *qdev = rx_ring->qdev;
2174 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2175 struct ob_mac_iocb_rsp *net_rsp = NULL;
2178 struct tx_ring *tx_ring;
2179 /* While there are entries in the completion queue. */
2180 while (prod != rx_ring->cnsmr_idx) {
2182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2186 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2188 switch (net_rsp->opcode) {
2190 case OPCODE_OB_MAC_TSO_IOCB:
2191 case OPCODE_OB_MAC_IOCB:
2192 ql_process_mac_tx_intr(qdev, net_rsp);
2195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2200 ql_update_cq(rx_ring);
2201 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2205 ql_write_cq_idx(rx_ring);
2206 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2207 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2208 if (atomic_read(&tx_ring->queue_stopped) &&
2209 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2211 * The queue got stopped because the tx_ring was full.
2212 * Wake it up, because it's now at least 25% empty.
2214 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2220 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2222 struct ql_adapter *qdev = rx_ring->qdev;
2223 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 struct ql_net_rsp_iocb *net_rsp;
2227 /* While there are entries in the completion queue. */
2228 while (prod != rx_ring->cnsmr_idx) {
2230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2234 net_rsp = rx_ring->curr_entry;
2236 switch (net_rsp->opcode) {
2237 case OPCODE_IB_MAC_IOCB:
2238 ql_process_mac_rx_intr(qdev, rx_ring,
2239 (struct ib_mac_iocb_rsp *)
2243 case OPCODE_IB_AE_IOCB:
2244 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2254 ql_update_cq(rx_ring);
2255 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256 if (count == budget)
2259 ql_update_buffer_queues(qdev, rx_ring);
2260 ql_write_cq_idx(rx_ring);
2264 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2266 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267 struct ql_adapter *qdev = rx_ring->qdev;
2268 struct rx_ring *trx_ring;
2269 int i, work_done = 0;
2270 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2275 /* Service the TX rings first. They start
2276 * right after the RSS rings. */
2277 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278 trx_ring = &qdev->rx_ring[i];
2279 /* If this TX completion ring belongs to this vector and
2280 * it's not empty then service it.
2282 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284 trx_ring->cnsmr_idx)) {
2285 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286 "%s: Servicing TX completion ring %d.\n",
2287 __func__, trx_ring->cq_id);
2288 ql_clean_outbound_rx_ring(trx_ring);
2293 * Now service the RSS ring if it's active.
2295 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296 rx_ring->cnsmr_idx) {
2297 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298 "%s: Servicing RX completion ring %d.\n",
2299 __func__, rx_ring->cq_id);
2300 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2303 if (work_done < budget) {
2304 napi_complete(napi);
2305 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2310 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2312 struct ql_adapter *qdev = netdev_priv(ndev);
2314 if (features & NETIF_F_HW_VLAN_RX) {
2315 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316 "Turning on VLAN in NIC_RCV_CFG.\n");
2317 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2320 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321 "Turning off VLAN in NIC_RCV_CFG.\n");
2322 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2326 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2327 netdev_features_t features)
2330 * Since there is no support for separate rx/tx vlan accel
2331 * enable/disable make sure tx flag is always in same state as rx.
2333 if (features & NETIF_F_HW_VLAN_RX)
2334 features |= NETIF_F_HW_VLAN_TX;
2336 features &= ~NETIF_F_HW_VLAN_TX;
2341 static int qlge_set_features(struct net_device *ndev,
2342 netdev_features_t features)
2344 netdev_features_t changed = ndev->features ^ features;
2346 if (changed & NETIF_F_HW_VLAN_RX)
2347 qlge_vlan_mode(ndev, features);
2352 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2354 u32 enable_bit = MAC_ADDR_E;
2356 if (ql_set_mac_addr_reg
2357 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2358 netif_err(qdev, ifup, qdev->ndev,
2359 "Failed to init vlan address.\n");
2363 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2365 struct ql_adapter *qdev = netdev_priv(ndev);
2368 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2372 __qlge_vlan_rx_add_vid(qdev, vid);
2373 set_bit(vid, qdev->active_vlans);
2375 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2378 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2382 if (ql_set_mac_addr_reg
2383 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2384 netif_err(qdev, ifup, qdev->ndev,
2385 "Failed to clear vlan address.\n");
2389 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2391 struct ql_adapter *qdev = netdev_priv(ndev);
2394 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2398 __qlge_vlan_rx_kill_vid(qdev, vid);
2399 clear_bit(vid, qdev->active_vlans);
2401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2404 static void qlge_restore_vlan(struct ql_adapter *qdev)
2409 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2413 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2414 __qlge_vlan_rx_add_vid(qdev, vid);
2416 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2419 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2420 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2422 struct rx_ring *rx_ring = dev_id;
2423 napi_schedule(&rx_ring->napi);
2427 /* This handles a fatal error, MPI activity, and the default
2428 * rx_ring in an MSI-X multiple vector environment.
2429 * In MSI/Legacy environment it also process the rest of
2432 static irqreturn_t qlge_isr(int irq, void *dev_id)
2434 struct rx_ring *rx_ring = dev_id;
2435 struct ql_adapter *qdev = rx_ring->qdev;
2436 struct intr_context *intr_context = &qdev->intr_context[0];
2440 spin_lock(&qdev->hw_lock);
2441 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2442 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2443 "Shared Interrupt, Not ours!\n");
2444 spin_unlock(&qdev->hw_lock);
2447 spin_unlock(&qdev->hw_lock);
2449 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2452 * Check for fatal error.
2455 ql_queue_asic_error(qdev);
2456 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2457 var = ql_read32(qdev, ERR_STS);
2458 netdev_err(qdev->ndev, "Resetting chip. "
2459 "Error Status Register = 0x%x\n", var);
2464 * Check MPI processor activity.
2466 if ((var & STS_PI) &&
2467 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2469 * We've got an async event or mailbox completion.
2470 * Handle it and clear the source of the interrupt.
2472 netif_err(qdev, intr, qdev->ndev,
2473 "Got MPI processor interrupt.\n");
2474 ql_disable_completion_interrupt(qdev, intr_context->intr);
2475 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2476 queue_delayed_work_on(smp_processor_id(),
2477 qdev->workqueue, &qdev->mpi_work, 0);
2482 * Get the bit-mask that shows the active queues for this
2483 * pass. Compare it to the queues that this irq services
2484 * and call napi if there's a match.
2486 var = ql_read32(qdev, ISR1);
2487 if (var & intr_context->irq_mask) {
2488 netif_info(qdev, intr, qdev->ndev,
2489 "Waking handler for rx_ring[0].\n");
2490 ql_disable_completion_interrupt(qdev, intr_context->intr);
2491 napi_schedule(&rx_ring->napi);
2494 ql_enable_completion_interrupt(qdev, intr_context->intr);
2495 return work_done ? IRQ_HANDLED : IRQ_NONE;
2498 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2501 if (skb_is_gso(skb)) {
2503 if (skb_header_cloned(skb)) {
2504 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2509 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2510 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2511 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2512 mac_iocb_ptr->total_hdrs_len =
2513 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2514 mac_iocb_ptr->net_trans_offset =
2515 cpu_to_le16(skb_network_offset(skb) |
2516 skb_transport_offset(skb)
2517 << OB_MAC_TRANSPORT_HDR_SHIFT);
2518 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2519 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2520 if (likely(skb->protocol == htons(ETH_P_IP))) {
2521 struct iphdr *iph = ip_hdr(skb);
2523 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2524 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2528 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2529 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2530 tcp_hdr(skb)->check =
2531 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2532 &ipv6_hdr(skb)->daddr,
2540 static void ql_hw_csum_setup(struct sk_buff *skb,
2541 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2544 struct iphdr *iph = ip_hdr(skb);
2546 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2547 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2548 mac_iocb_ptr->net_trans_offset =
2549 cpu_to_le16(skb_network_offset(skb) |
2550 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2552 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2553 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2554 if (likely(iph->protocol == IPPROTO_TCP)) {
2555 check = &(tcp_hdr(skb)->check);
2556 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2557 mac_iocb_ptr->total_hdrs_len =
2558 cpu_to_le16(skb_transport_offset(skb) +
2559 (tcp_hdr(skb)->doff << 2));
2561 check = &(udp_hdr(skb)->check);
2562 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2563 mac_iocb_ptr->total_hdrs_len =
2564 cpu_to_le16(skb_transport_offset(skb) +
2565 sizeof(struct udphdr));
2567 *check = ~csum_tcpudp_magic(iph->saddr,
2568 iph->daddr, len, iph->protocol, 0);
2571 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2573 struct tx_ring_desc *tx_ring_desc;
2574 struct ob_mac_iocb_req *mac_iocb_ptr;
2575 struct ql_adapter *qdev = netdev_priv(ndev);
2577 struct tx_ring *tx_ring;
2578 u32 tx_ring_idx = (u32) skb->queue_mapping;
2580 tx_ring = &qdev->tx_ring[tx_ring_idx];
2582 if (skb_padto(skb, ETH_ZLEN))
2583 return NETDEV_TX_OK;
2585 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2586 netif_info(qdev, tx_queued, qdev->ndev,
2587 "%s: shutting down tx queue %d du to lack of resources.\n",
2588 __func__, tx_ring_idx);
2589 netif_stop_subqueue(ndev, tx_ring->wq_id);
2590 atomic_inc(&tx_ring->queue_stopped);
2591 tx_ring->tx_errors++;
2592 return NETDEV_TX_BUSY;
2594 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2595 mac_iocb_ptr = tx_ring_desc->queue_entry;
2596 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2598 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2599 mac_iocb_ptr->tid = tx_ring_desc->index;
2600 /* We use the upper 32-bits to store the tx queue for this IO.
2601 * When we get the completion we can use it to establish the context.
2603 mac_iocb_ptr->txq_idx = tx_ring_idx;
2604 tx_ring_desc->skb = skb;
2606 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2608 if (vlan_tx_tag_present(skb)) {
2609 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2610 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2611 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2612 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2614 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2616 dev_kfree_skb_any(skb);
2617 return NETDEV_TX_OK;
2618 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2619 ql_hw_csum_setup(skb,
2620 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2622 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2624 netif_err(qdev, tx_queued, qdev->ndev,
2625 "Could not map the segments.\n");
2626 tx_ring->tx_errors++;
2627 return NETDEV_TX_BUSY;
2629 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2630 tx_ring->prod_idx++;
2631 if (tx_ring->prod_idx == tx_ring->wq_len)
2632 tx_ring->prod_idx = 0;
2635 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2636 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2637 "tx queued, slot %d, len %d\n",
2638 tx_ring->prod_idx, skb->len);
2640 atomic_dec(&tx_ring->tx_count);
2641 return NETDEV_TX_OK;
2645 static void ql_free_shadow_space(struct ql_adapter *qdev)
2647 if (qdev->rx_ring_shadow_reg_area) {
2648 pci_free_consistent(qdev->pdev,
2650 qdev->rx_ring_shadow_reg_area,
2651 qdev->rx_ring_shadow_reg_dma);
2652 qdev->rx_ring_shadow_reg_area = NULL;
2654 if (qdev->tx_ring_shadow_reg_area) {
2655 pci_free_consistent(qdev->pdev,
2657 qdev->tx_ring_shadow_reg_area,
2658 qdev->tx_ring_shadow_reg_dma);
2659 qdev->tx_ring_shadow_reg_area = NULL;
2663 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2665 qdev->rx_ring_shadow_reg_area =
2666 pci_alloc_consistent(qdev->pdev,
2667 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2668 if (qdev->rx_ring_shadow_reg_area == NULL) {
2669 netif_err(qdev, ifup, qdev->ndev,
2670 "Allocation of RX shadow space failed.\n");
2673 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2674 qdev->tx_ring_shadow_reg_area =
2675 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2676 &qdev->tx_ring_shadow_reg_dma);
2677 if (qdev->tx_ring_shadow_reg_area == NULL) {
2678 netif_err(qdev, ifup, qdev->ndev,
2679 "Allocation of TX shadow space failed.\n");
2680 goto err_wqp_sh_area;
2682 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2686 pci_free_consistent(qdev->pdev,
2688 qdev->rx_ring_shadow_reg_area,
2689 qdev->rx_ring_shadow_reg_dma);
2693 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2695 struct tx_ring_desc *tx_ring_desc;
2697 struct ob_mac_iocb_req *mac_iocb_ptr;
2699 mac_iocb_ptr = tx_ring->wq_base;
2700 tx_ring_desc = tx_ring->q;
2701 for (i = 0; i < tx_ring->wq_len; i++) {
2702 tx_ring_desc->index = i;
2703 tx_ring_desc->skb = NULL;
2704 tx_ring_desc->queue_entry = mac_iocb_ptr;
2708 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2709 atomic_set(&tx_ring->queue_stopped, 0);
2712 static void ql_free_tx_resources(struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2715 if (tx_ring->wq_base) {
2716 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2717 tx_ring->wq_base, tx_ring->wq_base_dma);
2718 tx_ring->wq_base = NULL;
2724 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2725 struct tx_ring *tx_ring)
2728 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2729 &tx_ring->wq_base_dma);
2731 if ((tx_ring->wq_base == NULL) ||
2732 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2733 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2737 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2738 if (tx_ring->q == NULL)
2743 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2744 tx_ring->wq_base, tx_ring->wq_base_dma);
2748 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2750 struct bq_desc *lbq_desc;
2752 uint32_t curr_idx, clean_idx;
2754 curr_idx = rx_ring->lbq_curr_idx;
2755 clean_idx = rx_ring->lbq_clean_idx;
2756 while (curr_idx != clean_idx) {
2757 lbq_desc = &rx_ring->lbq[curr_idx];
2759 if (lbq_desc->p.pg_chunk.last_flag) {
2760 pci_unmap_page(qdev->pdev,
2761 lbq_desc->p.pg_chunk.map,
2762 ql_lbq_block_size(qdev),
2763 PCI_DMA_FROMDEVICE);
2764 lbq_desc->p.pg_chunk.last_flag = 0;
2767 put_page(lbq_desc->p.pg_chunk.page);
2768 lbq_desc->p.pg_chunk.page = NULL;
2770 if (++curr_idx == rx_ring->lbq_len)
2776 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2779 struct bq_desc *sbq_desc;
2781 for (i = 0; i < rx_ring->sbq_len; i++) {
2782 sbq_desc = &rx_ring->sbq[i];
2783 if (sbq_desc == NULL) {
2784 netif_err(qdev, ifup, qdev->ndev,
2785 "sbq_desc %d is NULL.\n", i);
2788 if (sbq_desc->p.skb) {
2789 pci_unmap_single(qdev->pdev,
2790 dma_unmap_addr(sbq_desc, mapaddr),
2791 dma_unmap_len(sbq_desc, maplen),
2792 PCI_DMA_FROMDEVICE);
2793 dev_kfree_skb(sbq_desc->p.skb);
2794 sbq_desc->p.skb = NULL;
2799 /* Free all large and small rx buffers associated
2800 * with the completion queues for this device.
2802 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2805 struct rx_ring *rx_ring;
2807 for (i = 0; i < qdev->rx_ring_count; i++) {
2808 rx_ring = &qdev->rx_ring[i];
2810 ql_free_lbq_buffers(qdev, rx_ring);
2812 ql_free_sbq_buffers(qdev, rx_ring);
2816 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2818 struct rx_ring *rx_ring;
2821 for (i = 0; i < qdev->rx_ring_count; i++) {
2822 rx_ring = &qdev->rx_ring[i];
2823 if (rx_ring->type != TX_Q)
2824 ql_update_buffer_queues(qdev, rx_ring);
2828 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2829 struct rx_ring *rx_ring)
2832 struct bq_desc *lbq_desc;
2833 __le64 *bq = rx_ring->lbq_base;
2835 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2836 for (i = 0; i < rx_ring->lbq_len; i++) {
2837 lbq_desc = &rx_ring->lbq[i];
2838 memset(lbq_desc, 0, sizeof(*lbq_desc));
2839 lbq_desc->index = i;
2840 lbq_desc->addr = bq;
2845 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2846 struct rx_ring *rx_ring)
2849 struct bq_desc *sbq_desc;
2850 __le64 *bq = rx_ring->sbq_base;
2852 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2853 for (i = 0; i < rx_ring->sbq_len; i++) {
2854 sbq_desc = &rx_ring->sbq[i];
2855 memset(sbq_desc, 0, sizeof(*sbq_desc));
2856 sbq_desc->index = i;
2857 sbq_desc->addr = bq;
2862 static void ql_free_rx_resources(struct ql_adapter *qdev,
2863 struct rx_ring *rx_ring)
2865 /* Free the small buffer queue. */
2866 if (rx_ring->sbq_base) {
2867 pci_free_consistent(qdev->pdev,
2869 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2870 rx_ring->sbq_base = NULL;
2873 /* Free the small buffer queue control blocks. */
2874 kfree(rx_ring->sbq);
2875 rx_ring->sbq = NULL;
2877 /* Free the large buffer queue. */
2878 if (rx_ring->lbq_base) {
2879 pci_free_consistent(qdev->pdev,
2881 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2882 rx_ring->lbq_base = NULL;
2885 /* Free the large buffer queue control blocks. */
2886 kfree(rx_ring->lbq);
2887 rx_ring->lbq = NULL;
2889 /* Free the rx queue. */
2890 if (rx_ring->cq_base) {
2891 pci_free_consistent(qdev->pdev,
2893 rx_ring->cq_base, rx_ring->cq_base_dma);
2894 rx_ring->cq_base = NULL;
2898 /* Allocate queues and buffers for this completions queue based
2899 * on the values in the parameter structure. */
2900 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2901 struct rx_ring *rx_ring)
2905 * Allocate the completion queue for this rx_ring.
2908 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2909 &rx_ring->cq_base_dma);
2911 if (rx_ring->cq_base == NULL) {
2912 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2916 if (rx_ring->sbq_len) {
2918 * Allocate small buffer queue.
2921 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2922 &rx_ring->sbq_base_dma);
2924 if (rx_ring->sbq_base == NULL) {
2925 netif_err(qdev, ifup, qdev->ndev,
2926 "Small buffer queue allocation failed.\n");
2931 * Allocate small buffer queue control blocks.
2934 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2936 if (rx_ring->sbq == NULL) {
2937 netif_err(qdev, ifup, qdev->ndev,
2938 "Small buffer queue control block allocation failed.\n");
2942 ql_init_sbq_ring(qdev, rx_ring);
2945 if (rx_ring->lbq_len) {
2947 * Allocate large buffer queue.
2950 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2951 &rx_ring->lbq_base_dma);
2953 if (rx_ring->lbq_base == NULL) {
2954 netif_err(qdev, ifup, qdev->ndev,
2955 "Large buffer queue allocation failed.\n");
2959 * Allocate large buffer queue control blocks.
2962 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2964 if (rx_ring->lbq == NULL) {
2965 netif_err(qdev, ifup, qdev->ndev,
2966 "Large buffer queue control block allocation failed.\n");
2970 ql_init_lbq_ring(qdev, rx_ring);
2976 ql_free_rx_resources(qdev, rx_ring);
2980 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2982 struct tx_ring *tx_ring;
2983 struct tx_ring_desc *tx_ring_desc;
2987 * Loop through all queues and free
2990 for (j = 0; j < qdev->tx_ring_count; j++) {
2991 tx_ring = &qdev->tx_ring[j];
2992 for (i = 0; i < tx_ring->wq_len; i++) {
2993 tx_ring_desc = &tx_ring->q[i];
2994 if (tx_ring_desc && tx_ring_desc->skb) {
2995 netif_err(qdev, ifdown, qdev->ndev,
2996 "Freeing lost SKB %p, from queue %d, index %d.\n",
2997 tx_ring_desc->skb, j,
2998 tx_ring_desc->index);
2999 ql_unmap_send(qdev, tx_ring_desc,
3000 tx_ring_desc->map_cnt);
3001 dev_kfree_skb(tx_ring_desc->skb);
3002 tx_ring_desc->skb = NULL;
3008 static void ql_free_mem_resources(struct ql_adapter *qdev)
3012 for (i = 0; i < qdev->tx_ring_count; i++)
3013 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3014 for (i = 0; i < qdev->rx_ring_count; i++)
3015 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3016 ql_free_shadow_space(qdev);
3019 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3023 /* Allocate space for our shadow registers and such. */
3024 if (ql_alloc_shadow_space(qdev))
3027 for (i = 0; i < qdev->rx_ring_count; i++) {
3028 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3029 netif_err(qdev, ifup, qdev->ndev,
3030 "RX resource allocation failed.\n");
3034 /* Allocate tx queue resources */
3035 for (i = 0; i < qdev->tx_ring_count; i++) {
3036 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3037 netif_err(qdev, ifup, qdev->ndev,
3038 "TX resource allocation failed.\n");
3045 ql_free_mem_resources(qdev);
3049 /* Set up the rx ring control block and pass it to the chip.
3050 * The control block is defined as
3051 * "Completion Queue Initialization Control Block", or cqicb.
3053 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3055 struct cqicb *cqicb = &rx_ring->cqicb;
3056 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3057 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3058 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3059 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3060 void __iomem *doorbell_area =
3061 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3065 __le64 *base_indirect_ptr;
3068 /* Set up the shadow registers for this ring. */
3069 rx_ring->prod_idx_sh_reg = shadow_reg;
3070 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3071 *rx_ring->prod_idx_sh_reg = 0;
3072 shadow_reg += sizeof(u64);
3073 shadow_reg_dma += sizeof(u64);
3074 rx_ring->lbq_base_indirect = shadow_reg;
3075 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3076 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3077 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3078 rx_ring->sbq_base_indirect = shadow_reg;
3079 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3081 /* PCI doorbell mem area + 0x00 for consumer index register */
3082 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3083 rx_ring->cnsmr_idx = 0;
3084 rx_ring->curr_entry = rx_ring->cq_base;
3086 /* PCI doorbell mem area + 0x04 for valid register */
3087 rx_ring->valid_db_reg = doorbell_area + 0x04;
3089 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3090 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3092 /* PCI doorbell mem area + 0x1c */
3093 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3095 memset((void *)cqicb, 0, sizeof(struct cqicb));
3096 cqicb->msix_vect = rx_ring->irq;
3098 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3099 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3101 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3103 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3106 * Set up the control block load flags.
3108 cqicb->flags = FLAGS_LC | /* Load queue base address */
3109 FLAGS_LV | /* Load MSI-X vector */
3110 FLAGS_LI; /* Load irq delay values */
3111 if (rx_ring->lbq_len) {
3112 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3113 tmp = (u64)rx_ring->lbq_base_dma;
3114 base_indirect_ptr = rx_ring->lbq_base_indirect;
3117 *base_indirect_ptr = cpu_to_le64(tmp);
3118 tmp += DB_PAGE_SIZE;
3119 base_indirect_ptr++;
3121 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3123 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3124 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3125 (u16) rx_ring->lbq_buf_size;
3126 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3127 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3128 (u16) rx_ring->lbq_len;
3129 cqicb->lbq_len = cpu_to_le16(bq_len);
3130 rx_ring->lbq_prod_idx = 0;
3131 rx_ring->lbq_curr_idx = 0;
3132 rx_ring->lbq_clean_idx = 0;
3133 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3135 if (rx_ring->sbq_len) {
3136 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3137 tmp = (u64)rx_ring->sbq_base_dma;
3138 base_indirect_ptr = rx_ring->sbq_base_indirect;
3141 *base_indirect_ptr = cpu_to_le64(tmp);
3142 tmp += DB_PAGE_SIZE;
3143 base_indirect_ptr++;
3145 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3147 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3148 cqicb->sbq_buf_size =
3149 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3150 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3151 (u16) rx_ring->sbq_len;
3152 cqicb->sbq_len = cpu_to_le16(bq_len);
3153 rx_ring->sbq_prod_idx = 0;
3154 rx_ring->sbq_curr_idx = 0;
3155 rx_ring->sbq_clean_idx = 0;
3156 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3158 switch (rx_ring->type) {
3160 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3161 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3164 /* Inbound completion handling rx_rings run in
3165 * separate NAPI contexts.
3167 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3169 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3170 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3173 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3176 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3177 "Initializing rx work queue.\n");
3178 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3179 CFG_LCQ, rx_ring->cq_id);
3181 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3187 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3189 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3190 void __iomem *doorbell_area =
3191 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3192 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3193 (tx_ring->wq_id * sizeof(u64));
3194 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3195 (tx_ring->wq_id * sizeof(u64));
3199 * Assign doorbell registers for this tx_ring.
3201 /* TX PCI doorbell mem area for tx producer index */
3202 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3203 tx_ring->prod_idx = 0;
3204 /* TX PCI doorbell mem area + 0x04 */
3205 tx_ring->valid_db_reg = doorbell_area + 0x04;
3208 * Assign shadow registers for this tx_ring.
3210 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3211 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3213 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3214 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3215 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3216 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3218 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3220 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3222 ql_init_tx_ring(qdev, tx_ring);
3224 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3225 (u16) tx_ring->wq_id);
3227 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3230 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3231 "Successfully loaded WQICB.\n");
3235 static void ql_disable_msix(struct ql_adapter *qdev)
3237 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3238 pci_disable_msix(qdev->pdev);
3239 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3240 kfree(qdev->msi_x_entry);
3241 qdev->msi_x_entry = NULL;
3242 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3243 pci_disable_msi(qdev->pdev);
3244 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3248 /* We start by trying to get the number of vectors
3249 * stored in qdev->intr_count. If we don't get that
3250 * many then we reduce the count and try again.
3252 static void ql_enable_msix(struct ql_adapter *qdev)
3256 /* Get the MSIX vectors. */
3257 if (qlge_irq_type == MSIX_IRQ) {
3258 /* Try to alloc space for the msix struct,
3259 * if it fails then go to MSI/legacy.
3261 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3262 sizeof(struct msix_entry),
3264 if (!qdev->msi_x_entry) {
3265 qlge_irq_type = MSI_IRQ;
3269 for (i = 0; i < qdev->intr_count; i++)
3270 qdev->msi_x_entry[i].entry = i;
3272 /* Loop to get our vectors. We start with
3273 * what we want and settle for what we get.
3276 err = pci_enable_msix(qdev->pdev,
3277 qdev->msi_x_entry, qdev->intr_count);
3279 qdev->intr_count = err;
3283 kfree(qdev->msi_x_entry);
3284 qdev->msi_x_entry = NULL;
3285 netif_warn(qdev, ifup, qdev->ndev,
3286 "MSI-X Enable failed, trying MSI.\n");
3287 qdev->intr_count = 1;
3288 qlge_irq_type = MSI_IRQ;
3289 } else if (err == 0) {
3290 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3291 netif_info(qdev, ifup, qdev->ndev,
3292 "MSI-X Enabled, got %d vectors.\n",
3298 qdev->intr_count = 1;
3299 if (qlge_irq_type == MSI_IRQ) {
3300 if (!pci_enable_msi(qdev->pdev)) {
3301 set_bit(QL_MSI_ENABLED, &qdev->flags);
3302 netif_info(qdev, ifup, qdev->ndev,
3303 "Running with MSI interrupts.\n");
3307 qlge_irq_type = LEG_IRQ;
3308 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3309 "Running with legacy interrupts.\n");
3312 /* Each vector services 1 RSS ring and and 1 or more
3313 * TX completion rings. This function loops through
3314 * the TX completion rings and assigns the vector that
3315 * will service it. An example would be if there are
3316 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3317 * This would mean that vector 0 would service RSS ring 0
3318 * and TX completion rings 0,1,2 and 3. Vector 1 would
3319 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3321 static void ql_set_tx_vect(struct ql_adapter *qdev)
3324 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3326 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327 /* Assign irq vectors to TX rx_rings.*/
3328 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3329 i < qdev->rx_ring_count; i++) {
3330 if (j == tx_rings_per_vector) {
3334 qdev->rx_ring[i].irq = vect;
3338 /* For single vector all rings have an irq
3341 for (i = 0; i < qdev->rx_ring_count; i++)
3342 qdev->rx_ring[i].irq = 0;
3346 /* Set the interrupt mask for this vector. Each vector
3347 * will service 1 RSS ring and 1 or more TX completion
3348 * rings. This function sets up a bit mask per vector
3349 * that indicates which rings it services.
3351 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3353 int j, vect = ctx->intr;
3354 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3356 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3357 /* Add the RSS ring serviced by this vector
3360 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3361 /* Add the TX ring(s) serviced by this vector
3363 for (j = 0; j < tx_rings_per_vector; j++) {
3365 (1 << qdev->rx_ring[qdev->rss_ring_count +
3366 (vect * tx_rings_per_vector) + j].cq_id);
3369 /* For single vector we just shift each queue's
3372 for (j = 0; j < qdev->rx_ring_count; j++)
3373 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3378 * Here we build the intr_context structures based on
3379 * our rx_ring count and intr vector count.
3380 * The intr_context structure is used to hook each vector
3381 * to possibly different handlers.
3383 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3386 struct intr_context *intr_context = &qdev->intr_context[0];
3388 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3389 /* Each rx_ring has it's
3390 * own intr_context since we have separate
3391 * vectors for each queue.
3393 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3394 qdev->rx_ring[i].irq = i;
3395 intr_context->intr = i;
3396 intr_context->qdev = qdev;
3397 /* Set up this vector's bit-mask that indicates
3398 * which queues it services.
3400 ql_set_irq_mask(qdev, intr_context);
3402 * We set up each vectors enable/disable/read bits so
3403 * there's no bit/mask calculations in the critical path.
3405 intr_context->intr_en_mask =
3406 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3407 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3409 intr_context->intr_dis_mask =
3410 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3413 intr_context->intr_read_mask =
3414 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3415 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3418 /* The first vector/queue handles
3419 * broadcast/multicast, fatal errors,
3420 * and firmware events. This in addition
3421 * to normal inbound NAPI processing.
3423 intr_context->handler = qlge_isr;
3424 sprintf(intr_context->name, "%s-rx-%d",
3425 qdev->ndev->name, i);
3428 * Inbound queues handle unicast frames only.
3430 intr_context->handler = qlge_msix_rx_isr;
3431 sprintf(intr_context->name, "%s-rx-%d",
3432 qdev->ndev->name, i);
3437 * All rx_rings use the same intr_context since
3438 * there is only one vector.
3440 intr_context->intr = 0;
3441 intr_context->qdev = qdev;
3443 * We set up each vectors enable/disable/read bits so
3444 * there's no bit/mask calculations in the critical path.
3446 intr_context->intr_en_mask =
3447 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3448 intr_context->intr_dis_mask =
3449 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3450 INTR_EN_TYPE_DISABLE;
3451 intr_context->intr_read_mask =
3452 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3454 * Single interrupt means one handler for all rings.
3456 intr_context->handler = qlge_isr;
3457 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3458 /* Set up this vector's bit-mask that indicates
3459 * which queues it services. In this case there is
3460 * a single vector so it will service all RSS and
3461 * TX completion rings.
3463 ql_set_irq_mask(qdev, intr_context);
3465 /* Tell the TX completion rings which MSIx vector
3466 * they will be using.
3468 ql_set_tx_vect(qdev);
3471 static void ql_free_irq(struct ql_adapter *qdev)
3474 struct intr_context *intr_context = &qdev->intr_context[0];
3476 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3477 if (intr_context->hooked) {
3478 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3479 free_irq(qdev->msi_x_entry[i].vector,
3481 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3482 "freeing msix interrupt %d.\n", i);
3484 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3485 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3486 "freeing msi interrupt %d.\n", i);
3490 ql_disable_msix(qdev);
3493 static int ql_request_irq(struct ql_adapter *qdev)
3497 struct pci_dev *pdev = qdev->pdev;
3498 struct intr_context *intr_context = &qdev->intr_context[0];
3500 ql_resolve_queues_to_irqs(qdev);
3502 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3503 atomic_set(&intr_context->irq_cnt, 0);
3504 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3505 status = request_irq(qdev->msi_x_entry[i].vector,
3506 intr_context->handler,
3511 netif_err(qdev, ifup, qdev->ndev,
3512 "Failed request for MSIX interrupt %d.\n",
3516 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517 "Hooked intr %d, queue type %s, with name %s.\n",
3519 qdev->rx_ring[i].type == DEFAULT_Q ?
3521 qdev->rx_ring[i].type == TX_Q ?
3523 qdev->rx_ring[i].type == RX_Q ?
3525 intr_context->name);
3528 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3529 "trying msi or legacy interrupts.\n");
3530 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3531 "%s: irq = %d.\n", __func__, pdev->irq);
3532 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3533 "%s: context->name = %s.\n", __func__,
3534 intr_context->name);
3535 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3536 "%s: dev_id = 0x%p.\n", __func__,
3539 request_irq(pdev->irq, qlge_isr,
3540 test_bit(QL_MSI_ENABLED,
3542 flags) ? 0 : IRQF_SHARED,
3543 intr_context->name, &qdev->rx_ring[0]);
3547 netif_err(qdev, ifup, qdev->ndev,
3548 "Hooked intr %d, queue type %s, with name %s.\n",
3550 qdev->rx_ring[0].type == DEFAULT_Q ?
3552 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3553 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3554 intr_context->name);
3556 intr_context->hooked = 1;
3560 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3565 static int ql_start_rss(struct ql_adapter *qdev)
3567 static const u8 init_hash_seed[] = {
3568 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3569 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3570 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3571 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3572 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3574 struct ricb *ricb = &qdev->ricb;
3577 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3579 memset((void *)ricb, 0, sizeof(*ricb));
3581 ricb->base_cq = RSS_L4K;
3583 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3584 ricb->mask = cpu_to_le16((u16)(0x3ff));
3587 * Fill out the Indirection Table.
3589 for (i = 0; i < 1024; i++)
3590 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3592 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3593 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3595 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3597 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3599 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3602 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3603 "Successfully loaded RICB.\n");
3607 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3611 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3614 /* Clear all the entries in the routing table. */
3615 for (i = 0; i < 16; i++) {
3616 status = ql_set_routing_reg(qdev, i, 0, 0);
3618 netif_err(qdev, ifup, qdev->ndev,
3619 "Failed to init routing register for CAM packets.\n");
3623 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3627 /* Initialize the frame-to-queue routing. */
3628 static int ql_route_initialize(struct ql_adapter *qdev)
3632 /* Clear all the entries in the routing table. */
3633 status = ql_clear_routing_entries(qdev);
3637 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3641 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3642 RT_IDX_IP_CSUM_ERR, 1);
3644 netif_err(qdev, ifup, qdev->ndev,
3645 "Failed to init routing register "
3646 "for IP CSUM error packets.\n");
3649 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3650 RT_IDX_TU_CSUM_ERR, 1);
3652 netif_err(qdev, ifup, qdev->ndev,
3653 "Failed to init routing register "
3654 "for TCP/UDP CSUM error packets.\n");
3657 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3659 netif_err(qdev, ifup, qdev->ndev,
3660 "Failed to init routing register for broadcast packets.\n");
3663 /* If we have more than one inbound queue, then turn on RSS in the
3666 if (qdev->rss_ring_count > 1) {
3667 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3668 RT_IDX_RSS_MATCH, 1);
3670 netif_err(qdev, ifup, qdev->ndev,
3671 "Failed to init routing register for MATCH RSS packets.\n");
3676 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3679 netif_err(qdev, ifup, qdev->ndev,
3680 "Failed to init routing register for CAM packets.\n");
3682 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3686 int ql_cam_route_initialize(struct ql_adapter *qdev)
3690 /* If check if the link is up and use to
3691 * determine if we are setting or clearing
3692 * the MAC address in the CAM.
3694 set = ql_read32(qdev, STS);
3695 set &= qdev->port_link_up;
3696 status = ql_set_mac_addr(qdev, set);
3698 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3702 status = ql_route_initialize(qdev);
3704 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3709 static int ql_adapter_initialize(struct ql_adapter *qdev)
3716 * Set up the System register to halt on errors.
3718 value = SYS_EFE | SYS_FAE;
3720 ql_write32(qdev, SYS, mask | value);
3722 /* Set the default queue, and VLAN behavior. */
3723 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3724 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3725 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3727 /* Set the MPI interrupt to enabled. */
3728 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3730 /* Enable the function, set pagesize, enable error checking. */
3731 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3732 FSC_EC | FSC_VM_PAGE_4K;
3733 value |= SPLT_SETTING;
3735 /* Set/clear header splitting. */
3736 mask = FSC_VM_PAGESIZE_MASK |
3737 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3738 ql_write32(qdev, FSC, mask | value);
3740 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3742 /* Set RX packet routing to use port/pci function on which the
3743 * packet arrived on in addition to usual frame routing.
3744 * This is helpful on bonding where both interfaces can have
3745 * the same MAC address.
3747 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3748 /* Reroute all packets to our Interface.
3749 * They may have been routed to MPI firmware
3752 value = ql_read32(qdev, MGMT_RCV_CFG);
3753 value &= ~MGMT_RCV_CFG_RM;
3756 /* Sticky reg needs clearing due to WOL. */
3757 ql_write32(qdev, MGMT_RCV_CFG, mask);
3758 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3760 /* Default WOL is enable on Mezz cards */
3761 if (qdev->pdev->subsystem_device == 0x0068 ||
3762 qdev->pdev->subsystem_device == 0x0180)
3763 qdev->wol = WAKE_MAGIC;
3765 /* Start up the rx queues. */
3766 for (i = 0; i < qdev->rx_ring_count; i++) {
3767 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3769 netif_err(qdev, ifup, qdev->ndev,
3770 "Failed to start rx ring[%d].\n", i);
3775 /* If there is more than one inbound completion queue
3776 * then download a RICB to configure RSS.
3778 if (qdev->rss_ring_count > 1) {
3779 status = ql_start_rss(qdev);
3781 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3786 /* Start up the tx queues. */
3787 for (i = 0; i < qdev->tx_ring_count; i++) {
3788 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3790 netif_err(qdev, ifup, qdev->ndev,
3791 "Failed to start tx ring[%d].\n", i);
3796 /* Initialize the port and set the max framesize. */
3797 status = qdev->nic_ops->port_initialize(qdev);
3799 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3801 /* Set up the MAC address and frame routing filter. */
3802 status = ql_cam_route_initialize(qdev);
3804 netif_err(qdev, ifup, qdev->ndev,
3805 "Failed to init CAM/Routing tables.\n");
3809 /* Start NAPI for the RSS queues. */
3810 for (i = 0; i < qdev->rss_ring_count; i++) {
3811 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3812 "Enabling NAPI for rx_ring[%d].\n", i);
3813 napi_enable(&qdev->rx_ring[i].napi);
3819 /* Issue soft reset to chip. */
3820 static int ql_adapter_reset(struct ql_adapter *qdev)
3824 unsigned long end_jiffies;
3826 /* Clear all the entries in the routing table. */
3827 status = ql_clear_routing_entries(qdev);
3829 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3833 end_jiffies = jiffies +
3834 max((unsigned long)1, usecs_to_jiffies(30));
3836 /* Check if bit is set then skip the mailbox command and
3837 * clear the bit, else we are in normal reset process.
3839 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3840 /* Stop management traffic. */
3841 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3843 /* Wait for the NIC and MGMNT FIFOs to empty. */
3844 ql_wait_fifo_empty(qdev);
3846 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3848 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3851 value = ql_read32(qdev, RST_FO);
3852 if ((value & RST_FO_FR) == 0)
3855 } while (time_before(jiffies, end_jiffies));
3857 if (value & RST_FO_FR) {
3858 netif_err(qdev, ifdown, qdev->ndev,
3859 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3860 status = -ETIMEDOUT;
3863 /* Resume management traffic. */
3864 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3868 static void ql_display_dev_info(struct net_device *ndev)
3870 struct ql_adapter *qdev = netdev_priv(ndev);
3872 netif_info(qdev, probe, qdev->ndev,
3873 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3874 "XG Roll = %d, XG Rev = %d.\n",
3877 qdev->chip_rev_id & 0x0000000f,
3878 qdev->chip_rev_id >> 4 & 0x0000000f,
3879 qdev->chip_rev_id >> 8 & 0x0000000f,
3880 qdev->chip_rev_id >> 12 & 0x0000000f);
3881 netif_info(qdev, probe, qdev->ndev,
3882 "MAC address %pM\n", ndev->dev_addr);
3885 static int ql_wol(struct ql_adapter *qdev)
3888 u32 wol = MB_WOL_DISABLE;
3890 /* The CAM is still intact after a reset, but if we
3891 * are doing WOL, then we may need to program the
3892 * routing regs. We would also need to issue the mailbox
3893 * commands to instruct the MPI what to do per the ethtool
3897 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3898 WAKE_MCAST | WAKE_BCAST)) {
3899 netif_err(qdev, ifdown, qdev->ndev,
3900 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3905 if (qdev->wol & WAKE_MAGIC) {
3906 status = ql_mb_wol_set_magic(qdev, 1);
3908 netif_err(qdev, ifdown, qdev->ndev,
3909 "Failed to set magic packet on %s.\n",
3913 netif_info(qdev, drv, qdev->ndev,
3914 "Enabled magic packet successfully on %s.\n",
3917 wol |= MB_WOL_MAGIC_PKT;
3921 wol |= MB_WOL_MODE_ON;
3922 status = ql_mb_wol_mode(qdev, wol);
3923 netif_err(qdev, drv, qdev->ndev,
3924 "WOL %s (wol code 0x%x) on %s\n",
3925 (status == 0) ? "Successfully set" : "Failed",
3926 wol, qdev->ndev->name);
3932 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3935 /* Don't kill the reset worker thread if we
3936 * are in the process of recovery.
3938 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3939 cancel_delayed_work_sync(&qdev->asic_reset_work);
3940 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3941 cancel_delayed_work_sync(&qdev->mpi_work);
3942 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3943 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3944 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3947 static int ql_adapter_down(struct ql_adapter *qdev)
3953 ql_cancel_all_work_sync(qdev);
3955 for (i = 0; i < qdev->rss_ring_count; i++)
3956 napi_disable(&qdev->rx_ring[i].napi);
3958 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3960 ql_disable_interrupts(qdev);
3962 ql_tx_ring_clean(qdev);
3964 /* Call netif_napi_del() from common point.
3966 for (i = 0; i < qdev->rss_ring_count; i++)
3967 netif_napi_del(&qdev->rx_ring[i].napi);
3969 status = ql_adapter_reset(qdev);
3971 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3973 ql_free_rx_buffers(qdev);
3978 static int ql_adapter_up(struct ql_adapter *qdev)
3982 err = ql_adapter_initialize(qdev);
3984 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3987 set_bit(QL_ADAPTER_UP, &qdev->flags);
3988 ql_alloc_rx_buffers(qdev);
3989 /* If the port is initialized and the
3990 * link is up the turn on the carrier.
3992 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3993 (ql_read32(qdev, STS) & qdev->port_link_up))
3995 /* Restore rx mode. */
3996 clear_bit(QL_ALLMULTI, &qdev->flags);
3997 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3998 qlge_set_multicast_list(qdev->ndev);
4000 /* Restore vlan setting. */
4001 qlge_restore_vlan(qdev);
4003 ql_enable_interrupts(qdev);
4004 ql_enable_all_completion_interrupts(qdev);
4005 netif_tx_start_all_queues(qdev->ndev);
4009 ql_adapter_reset(qdev);
4013 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4015 ql_free_mem_resources(qdev);
4019 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4023 if (ql_alloc_mem_resources(qdev)) {
4024 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4027 status = ql_request_irq(qdev);
4031 static int qlge_close(struct net_device *ndev)
4033 struct ql_adapter *qdev = netdev_priv(ndev);
4035 /* If we hit pci_channel_io_perm_failure
4036 * failure condition, then we already
4037 * brought the adapter down.
4039 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4040 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4041 clear_bit(QL_EEH_FATAL, &qdev->flags);
4046 * Wait for device to recover from a reset.
4047 * (Rarely happens, but possible.)
4049 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4051 ql_adapter_down(qdev);
4052 ql_release_adapter_resources(qdev);
4056 static int ql_configure_rings(struct ql_adapter *qdev)
4059 struct rx_ring *rx_ring;
4060 struct tx_ring *tx_ring;
4061 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4062 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4063 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4065 qdev->lbq_buf_order = get_order(lbq_buf_len);
4067 /* In a perfect world we have one RSS ring for each CPU
4068 * and each has it's own vector. To do that we ask for
4069 * cpu_cnt vectors. ql_enable_msix() will adjust the
4070 * vector count to what we actually get. We then
4071 * allocate an RSS ring for each.
4072 * Essentially, we are doing min(cpu_count, msix_vector_count).
4074 qdev->intr_count = cpu_cnt;
4075 ql_enable_msix(qdev);
4076 /* Adjust the RSS ring count to the actual vector count. */
4077 qdev->rss_ring_count = qdev->intr_count;
4078 qdev->tx_ring_count = cpu_cnt;
4079 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4081 for (i = 0; i < qdev->tx_ring_count; i++) {
4082 tx_ring = &qdev->tx_ring[i];
4083 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4084 tx_ring->qdev = qdev;
4086 tx_ring->wq_len = qdev->tx_ring_size;
4088 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4091 * The completion queue ID for the tx rings start
4092 * immediately after the rss rings.
4094 tx_ring->cq_id = qdev->rss_ring_count + i;
4097 for (i = 0; i < qdev->rx_ring_count; i++) {
4098 rx_ring = &qdev->rx_ring[i];
4099 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4100 rx_ring->qdev = qdev;
4102 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4103 if (i < qdev->rss_ring_count) {
4105 * Inbound (RSS) queues.
4107 rx_ring->cq_len = qdev->rx_ring_size;
4109 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4110 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4112 rx_ring->lbq_len * sizeof(__le64);
4113 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4114 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4115 "lbq_buf_size %d, order = %d\n",
4116 rx_ring->lbq_buf_size,
4117 qdev->lbq_buf_order);
4118 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4120 rx_ring->sbq_len * sizeof(__le64);
4121 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4122 rx_ring->type = RX_Q;
4125 * Outbound queue handles outbound completions only.
4127 /* outbound cq is same size as tx_ring it services. */
4128 rx_ring->cq_len = qdev->tx_ring_size;
4130 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4131 rx_ring->lbq_len = 0;
4132 rx_ring->lbq_size = 0;
4133 rx_ring->lbq_buf_size = 0;
4134 rx_ring->sbq_len = 0;
4135 rx_ring->sbq_size = 0;
4136 rx_ring->sbq_buf_size = 0;
4137 rx_ring->type = TX_Q;
4143 static int qlge_open(struct net_device *ndev)
4146 struct ql_adapter *qdev = netdev_priv(ndev);
4148 err = ql_adapter_reset(qdev);
4152 err = ql_configure_rings(qdev);
4156 err = ql_get_adapter_resources(qdev);
4160 err = ql_adapter_up(qdev);
4167 ql_release_adapter_resources(qdev);
4171 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4173 struct rx_ring *rx_ring;
4177 /* Wait for an outstanding reset to complete. */
4178 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4180 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4181 netif_err(qdev, ifup, qdev->ndev,
4182 "Waiting for adapter UP...\n");
4187 netif_err(qdev, ifup, qdev->ndev,
4188 "Timed out waiting for adapter UP\n");
4193 status = ql_adapter_down(qdev);
4197 /* Get the new rx buffer size. */
4198 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4199 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4200 qdev->lbq_buf_order = get_order(lbq_buf_len);
4202 for (i = 0; i < qdev->rss_ring_count; i++) {
4203 rx_ring = &qdev->rx_ring[i];
4204 /* Set the new size. */
4205 rx_ring->lbq_buf_size = lbq_buf_len;
4208 status = ql_adapter_up(qdev);
4214 netif_alert(qdev, ifup, qdev->ndev,
4215 "Driver up/down cycle failed, closing device.\n");
4216 set_bit(QL_ADAPTER_UP, &qdev->flags);
4217 dev_close(qdev->ndev);
4221 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4223 struct ql_adapter *qdev = netdev_priv(ndev);
4226 if (ndev->mtu == 1500 && new_mtu == 9000) {
4227 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4228 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4229 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4233 queue_delayed_work(qdev->workqueue,
4234 &qdev->mpi_port_cfg_work, 3*HZ);
4236 ndev->mtu = new_mtu;
4238 if (!netif_running(qdev->ndev)) {
4242 status = ql_change_rx_buffers(qdev);
4244 netif_err(qdev, ifup, qdev->ndev,
4245 "Changing MTU failed.\n");
4251 static struct net_device_stats *qlge_get_stats(struct net_device
4254 struct ql_adapter *qdev = netdev_priv(ndev);
4255 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4256 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4257 unsigned long pkts, mcast, dropped, errors, bytes;
4261 pkts = mcast = dropped = errors = bytes = 0;
4262 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4263 pkts += rx_ring->rx_packets;
4264 bytes += rx_ring->rx_bytes;
4265 dropped += rx_ring->rx_dropped;
4266 errors += rx_ring->rx_errors;
4267 mcast += rx_ring->rx_multicast;
4269 ndev->stats.rx_packets = pkts;
4270 ndev->stats.rx_bytes = bytes;
4271 ndev->stats.rx_dropped = dropped;
4272 ndev->stats.rx_errors = errors;
4273 ndev->stats.multicast = mcast;
4276 pkts = errors = bytes = 0;
4277 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4278 pkts += tx_ring->tx_packets;
4279 bytes += tx_ring->tx_bytes;
4280 errors += tx_ring->tx_errors;
4282 ndev->stats.tx_packets = pkts;
4283 ndev->stats.tx_bytes = bytes;
4284 ndev->stats.tx_errors = errors;
4285 return &ndev->stats;
4288 static void qlge_set_multicast_list(struct net_device *ndev)
4290 struct ql_adapter *qdev = netdev_priv(ndev);
4291 struct netdev_hw_addr *ha;
4294 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4298 * Set or clear promiscuous mode if a
4299 * transition is taking place.
4301 if (ndev->flags & IFF_PROMISC) {
4302 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4303 if (ql_set_routing_reg
4304 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4305 netif_err(qdev, hw, qdev->ndev,
4306 "Failed to set promiscuous mode.\n");
4308 set_bit(QL_PROMISCUOUS, &qdev->flags);
4312 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4313 if (ql_set_routing_reg
4314 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4315 netif_err(qdev, hw, qdev->ndev,
4316 "Failed to clear promiscuous mode.\n");
4318 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4324 * Set or clear all multicast mode if a
4325 * transition is taking place.
4327 if ((ndev->flags & IFF_ALLMULTI) ||
4328 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4329 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4330 if (ql_set_routing_reg
4331 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4332 netif_err(qdev, hw, qdev->ndev,
4333 "Failed to set all-multi mode.\n");
4335 set_bit(QL_ALLMULTI, &qdev->flags);
4339 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4340 if (ql_set_routing_reg
4341 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4342 netif_err(qdev, hw, qdev->ndev,
4343 "Failed to clear all-multi mode.\n");
4345 clear_bit(QL_ALLMULTI, &qdev->flags);
4350 if (!netdev_mc_empty(ndev)) {
4351 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4355 netdev_for_each_mc_addr(ha, ndev) {
4356 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4357 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4358 netif_err(qdev, hw, qdev->ndev,
4359 "Failed to loadmulticast address.\n");
4360 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4365 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4366 if (ql_set_routing_reg
4367 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4368 netif_err(qdev, hw, qdev->ndev,
4369 "Failed to set multicast match mode.\n");
4371 set_bit(QL_ALLMULTI, &qdev->flags);
4375 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4378 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4380 struct ql_adapter *qdev = netdev_priv(ndev);
4381 struct sockaddr *addr = p;
4384 if (!is_valid_ether_addr(addr->sa_data))
4385 return -EADDRNOTAVAIL;
4386 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4387 /* Update local copy of current mac address. */
4388 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4390 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4393 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4394 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4396 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4397 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4401 static void qlge_tx_timeout(struct net_device *ndev)
4403 struct ql_adapter *qdev = netdev_priv(ndev);
4404 ql_queue_asic_error(qdev);
4407 static void ql_asic_reset_work(struct work_struct *work)
4409 struct ql_adapter *qdev =
4410 container_of(work, struct ql_adapter, asic_reset_work.work);
4413 status = ql_adapter_down(qdev);
4417 status = ql_adapter_up(qdev);
4421 /* Restore rx mode. */
4422 clear_bit(QL_ALLMULTI, &qdev->flags);
4423 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4424 qlge_set_multicast_list(qdev->ndev);
4429 netif_alert(qdev, ifup, qdev->ndev,
4430 "Driver up/down cycle failed, closing device\n");
4432 set_bit(QL_ADAPTER_UP, &qdev->flags);
4433 dev_close(qdev->ndev);
4437 static const struct nic_operations qla8012_nic_ops = {
4438 .get_flash = ql_get_8012_flash_params,
4439 .port_initialize = ql_8012_port_initialize,
4442 static const struct nic_operations qla8000_nic_ops = {
4443 .get_flash = ql_get_8000_flash_params,
4444 .port_initialize = ql_8000_port_initialize,
4447 /* Find the pcie function number for the other NIC
4448 * on this chip. Since both NIC functions share a
4449 * common firmware we have the lowest enabled function
4450 * do any common work. Examples would be resetting
4451 * after a fatal firmware error, or doing a firmware
4454 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4458 u32 nic_func1, nic_func2;
4460 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4465 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4466 MPI_TEST_NIC_FUNC_MASK);
4467 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4468 MPI_TEST_NIC_FUNC_MASK);
4470 if (qdev->func == nic_func1)
4471 qdev->alt_func = nic_func2;
4472 else if (qdev->func == nic_func2)
4473 qdev->alt_func = nic_func1;
4480 static int ql_get_board_info(struct ql_adapter *qdev)
4484 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4488 status = ql_get_alt_pcie_func(qdev);
4492 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4494 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4495 qdev->port_link_up = STS_PL1;
4496 qdev->port_init = STS_PI1;
4497 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4498 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4500 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4501 qdev->port_link_up = STS_PL0;
4502 qdev->port_init = STS_PI0;
4503 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4504 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4506 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4507 qdev->device_id = qdev->pdev->device;
4508 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4509 qdev->nic_ops = &qla8012_nic_ops;
4510 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4511 qdev->nic_ops = &qla8000_nic_ops;
4515 static void ql_release_all(struct pci_dev *pdev)
4517 struct net_device *ndev = pci_get_drvdata(pdev);
4518 struct ql_adapter *qdev = netdev_priv(ndev);
4520 if (qdev->workqueue) {
4521 destroy_workqueue(qdev->workqueue);
4522 qdev->workqueue = NULL;
4526 iounmap(qdev->reg_base);
4527 if (qdev->doorbell_area)
4528 iounmap(qdev->doorbell_area);
4529 vfree(qdev->mpi_coredump);
4530 pci_release_regions(pdev);
4531 pci_set_drvdata(pdev, NULL);
4534 static int __devinit ql_init_device(struct pci_dev *pdev,
4535 struct net_device *ndev, int cards_found)
4537 struct ql_adapter *qdev = netdev_priv(ndev);
4540 memset((void *)qdev, 0, sizeof(*qdev));
4541 err = pci_enable_device(pdev);
4543 dev_err(&pdev->dev, "PCI device enable failed.\n");
4549 pci_set_drvdata(pdev, ndev);
4551 /* Set PCIe read request size */
4552 err = pcie_set_readrq(pdev, 4096);
4554 dev_err(&pdev->dev, "Set readrq failed.\n");
4558 err = pci_request_regions(pdev, DRV_NAME);
4560 dev_err(&pdev->dev, "PCI region request failed.\n");
4564 pci_set_master(pdev);
4565 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4566 set_bit(QL_DMA64, &qdev->flags);
4567 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4569 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4571 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4575 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4579 /* Set PCIe reset type for EEH to fundamental. */
4580 pdev->needs_freset = 1;
4581 pci_save_state(pdev);
4583 ioremap_nocache(pci_resource_start(pdev, 1),
4584 pci_resource_len(pdev, 1));
4585 if (!qdev->reg_base) {
4586 dev_err(&pdev->dev, "Register mapping failed.\n");
4591 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4592 qdev->doorbell_area =
4593 ioremap_nocache(pci_resource_start(pdev, 3),
4594 pci_resource_len(pdev, 3));
4595 if (!qdev->doorbell_area) {
4596 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4601 err = ql_get_board_info(qdev);
4603 dev_err(&pdev->dev, "Register access failed.\n");
4607 qdev->msg_enable = netif_msg_init(debug, default_msg);
4608 spin_lock_init(&qdev->hw_lock);
4609 spin_lock_init(&qdev->stats_lock);
4611 if (qlge_mpi_coredump) {
4612 qdev->mpi_coredump =
4613 vmalloc(sizeof(struct ql_mpi_coredump));
4614 if (qdev->mpi_coredump == NULL) {
4615 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4619 if (qlge_force_coredump)
4620 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4622 /* make sure the EEPROM is good */
4623 err = qdev->nic_ops->get_flash(qdev);
4625 dev_err(&pdev->dev, "Invalid FLASH.\n");
4629 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4630 /* Keep local copy of current mac address. */
4631 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4633 /* Set up the default ring sizes. */
4634 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4635 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4637 /* Set up the coalescing parameters. */
4638 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4639 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4640 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4641 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4644 * Set up the operating parameters.
4646 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4647 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4648 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4649 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4650 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4651 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4652 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4653 init_completion(&qdev->ide_completion);
4654 mutex_init(&qdev->mpi_mutex);
4657 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4658 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4659 DRV_NAME, DRV_VERSION);
4663 ql_release_all(pdev);
4665 pci_disable_device(pdev);
4669 static const struct net_device_ops qlge_netdev_ops = {
4670 .ndo_open = qlge_open,
4671 .ndo_stop = qlge_close,
4672 .ndo_start_xmit = qlge_send,
4673 .ndo_change_mtu = qlge_change_mtu,
4674 .ndo_get_stats = qlge_get_stats,
4675 .ndo_set_rx_mode = qlge_set_multicast_list,
4676 .ndo_set_mac_address = qlge_set_mac_address,
4677 .ndo_validate_addr = eth_validate_addr,
4678 .ndo_tx_timeout = qlge_tx_timeout,
4679 .ndo_fix_features = qlge_fix_features,
4680 .ndo_set_features = qlge_set_features,
4681 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4682 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4685 static void ql_timer(unsigned long data)
4687 struct ql_adapter *qdev = (struct ql_adapter *)data;
4690 var = ql_read32(qdev, STS);
4691 if (pci_channel_offline(qdev->pdev)) {
4692 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4696 mod_timer(&qdev->timer, jiffies + (5*HZ));
4699 static int __devinit qlge_probe(struct pci_dev *pdev,
4700 const struct pci_device_id *pci_entry)
4702 struct net_device *ndev = NULL;
4703 struct ql_adapter *qdev = NULL;
4704 static int cards_found = 0;
4707 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4708 min(MAX_CPUS, (int)num_online_cpus()));
4712 err = ql_init_device(pdev, ndev, cards_found);
4718 qdev = netdev_priv(ndev);
4719 SET_NETDEV_DEV(ndev, &pdev->dev);
4720 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4721 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4722 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4723 ndev->features = ndev->hw_features |
4724 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4726 if (test_bit(QL_DMA64, &qdev->flags))
4727 ndev->features |= NETIF_F_HIGHDMA;
4730 * Set up net_device structure.
4732 ndev->tx_queue_len = qdev->tx_ring_size;
4733 ndev->irq = pdev->irq;
4735 ndev->netdev_ops = &qlge_netdev_ops;
4736 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4737 ndev->watchdog_timeo = 10 * HZ;
4739 err = register_netdev(ndev);
4741 dev_err(&pdev->dev, "net device registration failed.\n");
4742 ql_release_all(pdev);
4743 pci_disable_device(pdev);
4746 /* Start up the timer to trigger EEH if
4749 init_timer_deferrable(&qdev->timer);
4750 qdev->timer.data = (unsigned long)qdev;
4751 qdev->timer.function = ql_timer;
4752 qdev->timer.expires = jiffies + (5*HZ);
4753 add_timer(&qdev->timer);
4755 ql_display_dev_info(ndev);
4756 atomic_set(&qdev->lb_count, 0);
4761 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4763 return qlge_send(skb, ndev);
4766 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4768 return ql_clean_inbound_rx_ring(rx_ring, budget);
4771 static void __devexit qlge_remove(struct pci_dev *pdev)
4773 struct net_device *ndev = pci_get_drvdata(pdev);
4774 struct ql_adapter *qdev = netdev_priv(ndev);
4775 del_timer_sync(&qdev->timer);
4776 ql_cancel_all_work_sync(qdev);
4777 unregister_netdev(ndev);
4778 ql_release_all(pdev);
4779 pci_disable_device(pdev);
4783 /* Clean up resources without touching hardware. */
4784 static void ql_eeh_close(struct net_device *ndev)
4787 struct ql_adapter *qdev = netdev_priv(ndev);
4789 if (netif_carrier_ok(ndev)) {
4790 netif_carrier_off(ndev);
4791 netif_stop_queue(ndev);
4794 /* Disabling the timer */
4795 del_timer_sync(&qdev->timer);
4796 ql_cancel_all_work_sync(qdev);
4798 for (i = 0; i < qdev->rss_ring_count; i++)
4799 netif_napi_del(&qdev->rx_ring[i].napi);
4801 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4802 ql_tx_ring_clean(qdev);
4803 ql_free_rx_buffers(qdev);
4804 ql_release_adapter_resources(qdev);
4808 * This callback is called by the PCI subsystem whenever
4809 * a PCI bus error is detected.
4811 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4812 enum pci_channel_state state)
4814 struct net_device *ndev = pci_get_drvdata(pdev);
4815 struct ql_adapter *qdev = netdev_priv(ndev);
4818 case pci_channel_io_normal:
4819 return PCI_ERS_RESULT_CAN_RECOVER;
4820 case pci_channel_io_frozen:
4821 netif_device_detach(ndev);
4822 if (netif_running(ndev))
4824 pci_disable_device(pdev);
4825 return PCI_ERS_RESULT_NEED_RESET;
4826 case pci_channel_io_perm_failure:
4828 "%s: pci_channel_io_perm_failure.\n", __func__);
4830 set_bit(QL_EEH_FATAL, &qdev->flags);
4831 return PCI_ERS_RESULT_DISCONNECT;
4834 /* Request a slot reset. */
4835 return PCI_ERS_RESULT_NEED_RESET;
4839 * This callback is called after the PCI buss has been reset.
4840 * Basically, this tries to restart the card from scratch.
4841 * This is a shortened version of the device probe/discovery code,
4842 * it resembles the first-half of the () routine.
4844 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4846 struct net_device *ndev = pci_get_drvdata(pdev);
4847 struct ql_adapter *qdev = netdev_priv(ndev);
4849 pdev->error_state = pci_channel_io_normal;
4851 pci_restore_state(pdev);
4852 if (pci_enable_device(pdev)) {
4853 netif_err(qdev, ifup, qdev->ndev,
4854 "Cannot re-enable PCI device after reset.\n");
4855 return PCI_ERS_RESULT_DISCONNECT;
4857 pci_set_master(pdev);
4859 if (ql_adapter_reset(qdev)) {
4860 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4861 set_bit(QL_EEH_FATAL, &qdev->flags);
4862 return PCI_ERS_RESULT_DISCONNECT;
4865 return PCI_ERS_RESULT_RECOVERED;
4868 static void qlge_io_resume(struct pci_dev *pdev)
4870 struct net_device *ndev = pci_get_drvdata(pdev);
4871 struct ql_adapter *qdev = netdev_priv(ndev);
4874 if (netif_running(ndev)) {
4875 err = qlge_open(ndev);
4877 netif_err(qdev, ifup, qdev->ndev,
4878 "Device initialization failed after reset.\n");
4882 netif_err(qdev, ifup, qdev->ndev,
4883 "Device was not running prior to EEH.\n");
4885 mod_timer(&qdev->timer, jiffies + (5*HZ));
4886 netif_device_attach(ndev);
4889 static struct pci_error_handlers qlge_err_handler = {
4890 .error_detected = qlge_io_error_detected,
4891 .slot_reset = qlge_io_slot_reset,
4892 .resume = qlge_io_resume,
4895 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4897 struct net_device *ndev = pci_get_drvdata(pdev);
4898 struct ql_adapter *qdev = netdev_priv(ndev);
4901 netif_device_detach(ndev);
4902 del_timer_sync(&qdev->timer);
4904 if (netif_running(ndev)) {
4905 err = ql_adapter_down(qdev);
4911 err = pci_save_state(pdev);
4915 pci_disable_device(pdev);
4917 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4923 static int qlge_resume(struct pci_dev *pdev)
4925 struct net_device *ndev = pci_get_drvdata(pdev);
4926 struct ql_adapter *qdev = netdev_priv(ndev);
4929 pci_set_power_state(pdev, PCI_D0);
4930 pci_restore_state(pdev);
4931 err = pci_enable_device(pdev);
4933 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4936 pci_set_master(pdev);
4938 pci_enable_wake(pdev, PCI_D3hot, 0);
4939 pci_enable_wake(pdev, PCI_D3cold, 0);
4941 if (netif_running(ndev)) {
4942 err = ql_adapter_up(qdev);
4947 mod_timer(&qdev->timer, jiffies + (5*HZ));
4948 netif_device_attach(ndev);
4952 #endif /* CONFIG_PM */
4954 static void qlge_shutdown(struct pci_dev *pdev)
4956 qlge_suspend(pdev, PMSG_SUSPEND);
4959 static struct pci_driver qlge_driver = {
4961 .id_table = qlge_pci_tbl,
4962 .probe = qlge_probe,
4963 .remove = __devexit_p(qlge_remove),
4965 .suspend = qlge_suspend,
4966 .resume = qlge_resume,
4968 .shutdown = qlge_shutdown,
4969 .err_handler = &qlge_err_handler
4972 static int __init qlge_init_module(void)
4974 return pci_register_driver(&qlge_driver);
4977 static void __exit qlge_exit(void)
4979 pci_unregister_driver(&qlge_driver);
4982 module_init(qlge_init_module);
4983 module_exit(qlge_exit);