2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 static int debug = -1; /* defaults above */
66 module_param(debug, int, 0664);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, 0664);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91 /* required last entry */
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 static int ql_wol(struct ql_adapter *qdev);
98 static void qlge_set_multicast_list(struct net_device *ndev);
100 /* This hardware semaphore causes exclusive access to
101 * resources shared between the NIC driver, MPI firmware,
102 * FCOE firmware and the FC driver.
104 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
109 case SEM_XGMAC0_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
112 case SEM_XGMAC1_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 sem_bits = SEM_SET << SEM_ICB_SHIFT;
118 case SEM_MAC_ADDR_MASK:
119 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
127 case SEM_RT_IDX_MASK:
128 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
130 case SEM_PROC_REG_MASK:
131 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138 ql_write32(qdev, SEM, sem_bits | sem_mask);
139 return !(ql_read32(qdev, SEM) & sem_bits);
142 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
144 unsigned int wait_count = 30;
146 if (!ql_sem_trylock(qdev, sem_mask))
149 } while (--wait_count);
153 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
155 ql_write32(qdev, SEM, sem_mask);
156 ql_read32(qdev, SEM); /* flush */
159 /* This function waits for a specific bit to come ready
160 * in a given register. It is used mostly by the initialize
161 * process, but is also used in kernel thread API such as
162 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
164 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 int count = UDELAY_COUNT;
170 temp = ql_read32(qdev, reg);
172 /* check for errors */
173 if (temp & err_bit) {
174 netif_alert(qdev, probe, qdev->ndev,
175 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 } else if (temp & bit)
180 udelay(UDELAY_DELAY);
183 netif_alert(qdev, probe, qdev->ndev,
184 "Timed out waiting for reg %x to come ready.\n", reg);
188 /* The CFG register is used to download TX and RX control blocks
189 * to the chip. This function waits for an operation to complete.
191 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
193 int count = UDELAY_COUNT;
197 temp = ql_read32(qdev, CFG);
202 udelay(UDELAY_DELAY);
209 /* Used to issue init control blocks to hw. Maps control block,
210 * sets address, triggers download, waits for completion.
212 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
222 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 map = pci_map_single(qdev->pdev, ptr, size, direction);
226 if (pci_dma_mapping_error(qdev->pdev, map)) {
227 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 status = ql_wait_cfg(qdev, bit);
237 netif_err(qdev, ifup, qdev->ndev,
238 "Timed out waiting for CFG to come ready.\n");
242 ql_write32(qdev, ICB_L, (u32) map);
243 ql_write32(qdev, ICB_H, (u32) (map >> 32));
245 mask = CFG_Q_MASK | (bit << 16);
246 value = bit | (q_id << CFG_Q_SHIFT);
247 ql_write32(qdev, CFG, (mask | value));
250 * Wait for the bit to clear after signaling hw.
252 status = ql_wait_cfg(qdev, bit);
254 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
255 pci_unmap_single(qdev->pdev, map, size, direction);
259 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
260 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
267 case MAC_ADDR_TYPE_MULTI_MAC:
268 case MAC_ADDR_TYPE_CAM_MAC:
271 ql_wait_reg_rdy(qdev,
272 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 (index << MAC_ADDR_IDX_SHIFT) | /* index */
277 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279 ql_wait_reg_rdy(qdev,
280 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285 ql_wait_reg_rdy(qdev,
286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 ql_wait_reg_rdy(qdev,
294 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 if (type == MAC_ADDR_TYPE_CAM_MAC) {
300 ql_wait_reg_rdy(qdev,
301 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305 (index << MAC_ADDR_IDX_SHIFT) | /* index */
306 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
308 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 case MAC_ADDR_TYPE_VLAN:
317 case MAC_ADDR_TYPE_MULTI_FLTR:
319 netif_crit(qdev, ifup, qdev->ndev,
320 "Address type %d not yet supported.\n", type);
327 /* Set up a MAC, multicast or VLAN address for the
328 * inbound frame matching.
330 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
337 case MAC_ADDR_TYPE_MULTI_MAC:
339 u32 upper = (addr[0] << 8) | addr[1];
340 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
341 (addr[4] << 8) | (addr[5]);
344 ql_wait_reg_rdy(qdev,
345 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
349 (index << MAC_ADDR_IDX_SHIFT) |
351 ql_write32(qdev, MAC_ADDR_DATA, lower);
353 ql_wait_reg_rdy(qdev,
354 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358 (index << MAC_ADDR_IDX_SHIFT) |
361 ql_write32(qdev, MAC_ADDR_DATA, upper);
363 ql_wait_reg_rdy(qdev,
364 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
369 case MAC_ADDR_TYPE_CAM_MAC:
372 u32 upper = (addr[0] << 8) | addr[1];
374 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
378 "Adding %s address %pM at index %d in the CAM.\n",
379 type == MAC_ADDR_TYPE_MULTI_MAC ?
380 "MULTICAST" : "UNICAST",
384 ql_wait_reg_rdy(qdev,
385 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
389 (index << MAC_ADDR_IDX_SHIFT) | /* index */
391 ql_write32(qdev, MAC_ADDR_DATA, lower);
393 ql_wait_reg_rdy(qdev,
394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
400 ql_write32(qdev, MAC_ADDR_DATA, upper);
402 ql_wait_reg_rdy(qdev,
403 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 /* This field should also include the queue id
410 and possibly the function id. Right now we hardcode
411 the route field to NIC core.
413 cam_output = (CAM_OUT_ROUTE_NIC |
415 func << CAM_OUT_FUNC_SHIFT) |
416 (0 << CAM_OUT_CQ_ID_SHIFT));
418 cam_output |= CAM_OUT_RV;
419 /* route to NIC core */
420 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423 case MAC_ADDR_TYPE_VLAN:
425 u32 enable_bit = *((u32 *) &addr[0]);
426 /* For VLAN, the addr actually holds a bit that
427 * either enables or disables the vlan id we are
428 * addressing. It's either MAC_ADDR_E on or off.
429 * That's bit-27 we're talking about.
431 netif_info(qdev, ifup, qdev->ndev,
432 "%s VLAN ID %d %s the CAM.\n",
433 enable_bit ? "Adding" : "Removing",
435 enable_bit ? "to" : "from");
438 ql_wait_reg_rdy(qdev,
439 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
443 (index << MAC_ADDR_IDX_SHIFT) | /* index */
445 enable_bit); /* enable/disable */
448 case MAC_ADDR_TYPE_MULTI_FLTR:
450 netif_crit(qdev, ifup, qdev->ndev,
451 "Address type %d not yet supported.\n", type);
458 /* Set or clear MAC address in hardware. We sometimes
459 * have to clear it to prevent wrong frame routing
460 * especially in a bonding environment.
462 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 char zero_mac_addr[ETH_ALEN];
469 addr = &qdev->current_mac_addr[0];
470 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
471 "Set Mac addr %pM\n", addr);
473 memset(zero_mac_addr, 0, ETH_ALEN);
474 addr = &zero_mac_addr[0];
475 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
476 "Clearing MAC address\n");
478 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
482 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
483 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
485 netif_err(qdev, ifup, qdev->ndev,
486 "Failed to init mac address.\n");
490 void ql_link_on(struct ql_adapter *qdev)
492 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
493 netif_carrier_on(qdev->ndev);
494 ql_set_mac_addr(qdev, 1);
497 void ql_link_off(struct ql_adapter *qdev)
499 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
500 netif_carrier_off(qdev->ndev);
501 ql_set_mac_addr(qdev, 0);
504 /* Get a specific frame routing value from the CAM.
505 * Used for debug and reg dump.
507 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
511 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
515 ql_write32(qdev, RT_IDX,
516 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
517 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520 *value = ql_read32(qdev, RT_DATA);
525 /* The NIC function for this chip has 16 routing indexes. Each one can be used
526 * to route different frame types to various inbound queues. We send broadcast/
527 * multicast/error frames to the default queue for slow handling,
528 * and CAM hit/RSS frames to the fast handling queues.
530 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int status = -EINVAL; /* Return error if no mask match. */
536 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
537 "%s %s mask %s the routing reg.\n",
538 enable ? "Adding" : "Removing",
539 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
540 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
541 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
542 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
543 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
544 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
545 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
546 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
547 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
548 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
549 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
550 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
551 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
552 index == RT_IDX_UNUSED013 ? "UNUSED13" :
553 index == RT_IDX_UNUSED014 ? "UNUSED14" :
554 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
555 "(Bad index != RT_IDX)",
556 enable ? "to" : "from");
561 value = RT_IDX_DST_CAM_Q | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 case RT_IDX_VALID: /* Promiscuous Mode frames. */
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
575 value = RT_IDX_DST_DFLT_Q | /* dest */
576 RT_IDX_TYPE_NICQ | /* type */
577 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
582 value = RT_IDX_DST_DFLT_Q | /* dest */
583 RT_IDX_TYPE_NICQ | /* type */
584 (RT_IDX_IP_CSUM_ERR_SLOT <<
585 RT_IDX_IDX_SHIFT); /* index */
588 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
590 value = RT_IDX_DST_DFLT_Q | /* dest */
591 RT_IDX_TYPE_NICQ | /* type */
592 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
593 RT_IDX_IDX_SHIFT); /* index */
596 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
598 value = RT_IDX_DST_DFLT_Q | /* dest */
599 RT_IDX_TYPE_NICQ | /* type */
600 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
605 value = RT_IDX_DST_DFLT_Q | /* dest */
606 RT_IDX_TYPE_NICQ | /* type */
607 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
612 value = RT_IDX_DST_DFLT_Q | /* dest */
613 RT_IDX_TYPE_NICQ | /* type */
614 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
619 value = RT_IDX_DST_RSS | /* dest */
620 RT_IDX_TYPE_NICQ | /* type */
621 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 case 0: /* Clear the E-bit on an entry. */
626 value = RT_IDX_DST_DFLT_Q | /* dest */
627 RT_IDX_TYPE_NICQ | /* type */
628 (index << RT_IDX_IDX_SHIFT);/* index */
632 netif_err(qdev, ifup, qdev->ndev,
633 "Mask type %d not yet supported.\n", mask);
639 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 value |= (enable ? RT_IDX_E : 0);
643 ql_write32(qdev, RT_IDX, value);
644 ql_write32(qdev, RT_DATA, enable ? mask : 0);
650 static void ql_enable_interrupts(struct ql_adapter *qdev)
652 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 static void ql_disable_interrupts(struct ql_adapter *qdev)
657 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 /* If we're running with multiple MSI-X vectors then we enable on the fly.
661 * Otherwise, we may have multiple outstanding workers and don't want to
662 * enable until the last one finishes. In this case, the irq_cnt gets
663 * incremented everytime we queue a worker and decremented everytime
664 * a worker finishes. Once it hits zero we enable the interrupt.
666 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 unsigned long hw_flags = 0;
670 struct intr_context *ctx = qdev->intr_context + intr;
672 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673 /* Always enable if we're MSIX multi interrupts and
674 * it's not the default (zeroeth) interrupt.
676 ql_write32(qdev, INTR_EN,
678 var = ql_read32(qdev, STS);
682 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
683 if (atomic_dec_and_test(&ctx->irq_cnt)) {
684 ql_write32(qdev, INTR_EN,
686 var = ql_read32(qdev, STS);
688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
692 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 struct intr_context *ctx;
697 /* HW disables for us if we're MSIX multi interrupts and
698 * it's not the default (zeroeth) interrupt.
700 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 ctx = qdev->intr_context + intr;
704 spin_lock(&qdev->hw_lock);
705 if (!atomic_read(&ctx->irq_cnt)) {
706 ql_write32(qdev, INTR_EN,
708 var = ql_read32(qdev, STS);
710 atomic_inc(&ctx->irq_cnt);
711 spin_unlock(&qdev->hw_lock);
715 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 for (i = 0; i < qdev->intr_count; i++) {
719 /* The enable call does a atomic_dec_and_test
720 * and enables only if the result is zero.
721 * So we precharge it here.
723 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
725 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
726 ql_enable_completion_interrupt(qdev, i);
731 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
735 __le16 *flash = (__le16 *)&qdev->flash;
737 status = strncmp((char *)&qdev->flash, str, 4);
739 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
743 for (i = 0; i < size; i++)
744 csum += le16_to_cpu(*flash++);
747 netif_err(qdev, ifup, qdev->ndev,
748 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
753 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 /* wait for reg to come ready */
757 status = ql_wait_reg_rdy(qdev,
758 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 /* set up for reg read */
762 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
763 /* wait for reg to come ready */
764 status = ql_wait_reg_rdy(qdev,
765 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 /* This data is stored on flash as an array of
769 * __le32. Since ql_read32() returns cpu endian
770 * we need to swap it back.
772 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
777 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
781 __le32 *p = (__le32 *)&qdev->flash;
785 /* Get flash offset for function and adjust
789 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
791 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
793 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 size = sizeof(struct flash_params_8000) / sizeof(u32);
797 for (i = 0; i < size; i++, p++) {
798 status = ql_read_flash_word(qdev, i+offset, p);
800 netif_err(qdev, ifup, qdev->ndev,
801 "Error reading flash.\n");
806 status = ql_validate_flash(qdev,
807 sizeof(struct flash_params_8000) / sizeof(u16),
810 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
815 /* Extract either manufacturer or BOFM modified
818 if (qdev->flash.flash_params_8000.data_type1 == 2)
820 qdev->flash.flash_params_8000.mac_addr1,
821 qdev->ndev->addr_len);
824 qdev->flash.flash_params_8000.mac_addr,
825 qdev->ndev->addr_len);
827 if (!is_valid_ether_addr(mac_addr)) {
828 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
833 memcpy(qdev->ndev->dev_addr,
835 qdev->ndev->addr_len);
838 ql_sem_unlock(qdev, SEM_FLASH_MASK);
842 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
846 __le32 *p = (__le32 *)&qdev->flash;
848 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
850 /* Second function's parameters follow the first
856 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 for (i = 0; i < size; i++, p++) {
860 status = ql_read_flash_word(qdev, i+offset, p);
862 netif_err(qdev, ifup, qdev->ndev,
863 "Error reading flash.\n");
869 status = ql_validate_flash(qdev,
870 sizeof(struct flash_params_8012) / sizeof(u16),
873 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
878 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
883 memcpy(qdev->ndev->dev_addr,
884 qdev->flash.flash_params_8012.mac_addr,
885 qdev->ndev->addr_len);
888 ql_sem_unlock(qdev, SEM_FLASH_MASK);
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
896 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 /* write the data to the data reg */
905 ql_write32(qdev, XGMAC_DATA, data);
906 /* trigger the write */
907 ql_write32(qdev, XGMAC_ADDR, reg);
911 /* xgmac register are located behind the xgmac_addr and xgmac_data
912 * register pair. Each read/write requires us to wait for the ready
913 * bit before reading/writing the data.
915 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 /* wait for reg to come ready */
919 status = ql_wait_reg_rdy(qdev,
920 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 /* set up for reg read */
924 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
925 /* wait for reg to come ready */
926 status = ql_wait_reg_rdy(qdev,
927 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
931 *data = ql_read32(qdev, XGMAC_DATA);
936 /* This is used for reading the 64-bit statistics regs. */
937 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
943 status = ql_read_xgmac_reg(qdev, reg, &lo);
947 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
951 *data = (u64) lo | ((u64) hi << 32);
957 static int ql_8000_port_initialize(struct ql_adapter *qdev)
961 * Get MPI firmware version for driver banner
964 status = ql_mb_about_fw(qdev);
967 status = ql_mb_get_fw_state(qdev);
970 /* Wake up a worker to get/set the TX/RX frame sizes. */
971 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
976 /* Take the MAC Core out of reset.
977 * Enable statistics counting.
978 * Take the transmitter/receiver out of reset.
979 * This functionality may be done in the MPI firmware at a
982 static int ql_8012_port_initialize(struct ql_adapter *qdev)
987 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988 /* Another function has the semaphore, so
989 * wait for the port init bit to come ready.
991 netif_info(qdev, link, qdev->ndev,
992 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
993 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
995 netif_crit(qdev, link, qdev->ndev,
996 "Port initialize timed out.\n");
1001 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1002 /* Set the core reset. */
1003 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 data |= GLOBAL_CFG_RESET;
1007 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1011 /* Clear the core reset and turn on jumbo for receiver. */
1012 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1013 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1014 data |= GLOBAL_CFG_TX_STAT_EN;
1015 data |= GLOBAL_CFG_RX_STAT_EN;
1016 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1020 /* Enable transmitter, and clear it's reset. */
1021 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1025 data |= TX_CFG_EN; /* Enable the transmitter. */
1026 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1030 /* Enable receiver and clear it's reset. */
1031 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1035 data |= RX_CFG_EN; /* Enable the receiver. */
1036 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1040 /* Turn on jumbo. */
1042 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1046 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1050 /* Signal to the world that the port is enabled. */
1051 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1053 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1057 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1059 return PAGE_SIZE << qdev->lbq_buf_order;
1062 /* Get the next large buffer. */
1063 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1065 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1066 rx_ring->lbq_curr_idx++;
1067 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1068 rx_ring->lbq_curr_idx = 0;
1069 rx_ring->lbq_free_cnt++;
1073 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1074 struct rx_ring *rx_ring)
1076 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1078 pci_dma_sync_single_for_cpu(qdev->pdev,
1079 dma_unmap_addr(lbq_desc, mapaddr),
1080 rx_ring->lbq_buf_size,
1081 PCI_DMA_FROMDEVICE);
1083 /* If it's the last chunk of our master page then
1086 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1087 == ql_lbq_block_size(qdev))
1088 pci_unmap_page(qdev->pdev,
1089 lbq_desc->p.pg_chunk.map,
1090 ql_lbq_block_size(qdev),
1091 PCI_DMA_FROMDEVICE);
1095 /* Get the next small buffer. */
1096 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1098 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1099 rx_ring->sbq_curr_idx++;
1100 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1101 rx_ring->sbq_curr_idx = 0;
1102 rx_ring->sbq_free_cnt++;
1106 /* Update an rx ring index. */
1107 static void ql_update_cq(struct rx_ring *rx_ring)
1109 rx_ring->cnsmr_idx++;
1110 rx_ring->curr_entry++;
1111 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1112 rx_ring->cnsmr_idx = 0;
1113 rx_ring->curr_entry = rx_ring->cq_base;
1117 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1119 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1123 struct bq_desc *lbq_desc)
1125 if (!rx_ring->pg_chunk.page) {
1127 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1129 qdev->lbq_buf_order);
1130 if (unlikely(!rx_ring->pg_chunk.page)) {
1131 netif_err(qdev, drv, qdev->ndev,
1132 "page allocation failed.\n");
1135 rx_ring->pg_chunk.offset = 0;
1136 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1137 0, ql_lbq_block_size(qdev),
1138 PCI_DMA_FROMDEVICE);
1139 if (pci_dma_mapping_error(qdev->pdev, map)) {
1140 __free_pages(rx_ring->pg_chunk.page,
1141 qdev->lbq_buf_order);
1142 netif_err(qdev, drv, qdev->ndev,
1143 "PCI mapping failed.\n");
1146 rx_ring->pg_chunk.map = map;
1147 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 /* Copy the current master pg_chunk info
1151 * to the current descriptor.
1153 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1155 /* Adjust the master page chunk for next
1158 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1159 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1160 rx_ring->pg_chunk.page = NULL;
1161 lbq_desc->p.pg_chunk.last_flag = 1;
1163 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1164 get_page(rx_ring->pg_chunk.page);
1165 lbq_desc->p.pg_chunk.last_flag = 0;
1169 /* Process (refill) a large buffer queue. */
1170 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1172 u32 clean_idx = rx_ring->lbq_clean_idx;
1173 u32 start_idx = clean_idx;
1174 struct bq_desc *lbq_desc;
1178 while (rx_ring->lbq_free_cnt > 32) {
1179 for (i = 0; i < 16; i++) {
1180 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181 "lbq: try cleaning clean_idx = %d.\n",
1183 lbq_desc = &rx_ring->lbq[clean_idx];
1184 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1185 netif_err(qdev, ifup, qdev->ndev,
1186 "Could not get a page chunk.\n");
1190 map = lbq_desc->p.pg_chunk.map +
1191 lbq_desc->p.pg_chunk.offset;
1192 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1193 dma_unmap_len_set(lbq_desc, maplen,
1194 rx_ring->lbq_buf_size);
1195 *lbq_desc->addr = cpu_to_le64(map);
1197 pci_dma_sync_single_for_device(qdev->pdev, map,
1198 rx_ring->lbq_buf_size,
1199 PCI_DMA_FROMDEVICE);
1201 if (clean_idx == rx_ring->lbq_len)
1205 rx_ring->lbq_clean_idx = clean_idx;
1206 rx_ring->lbq_prod_idx += 16;
1207 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1208 rx_ring->lbq_prod_idx = 0;
1209 rx_ring->lbq_free_cnt -= 16;
1212 if (start_idx != clean_idx) {
1213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1214 "lbq: updating prod idx = %d.\n",
1215 rx_ring->lbq_prod_idx);
1216 ql_write_db_reg(rx_ring->lbq_prod_idx,
1217 rx_ring->lbq_prod_idx_db_reg);
1221 /* Process (refill) a small buffer queue. */
1222 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1224 u32 clean_idx = rx_ring->sbq_clean_idx;
1225 u32 start_idx = clean_idx;
1226 struct bq_desc *sbq_desc;
1230 while (rx_ring->sbq_free_cnt > 16) {
1231 for (i = 0; i < 16; i++) {
1232 sbq_desc = &rx_ring->sbq[clean_idx];
1233 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234 "sbq: try cleaning clean_idx = %d.\n",
1236 if (sbq_desc->p.skb == NULL) {
1237 netif_printk(qdev, rx_status, KERN_DEBUG,
1239 "sbq: getting new skb for index %d.\n",
1242 netdev_alloc_skb(qdev->ndev,
1244 if (sbq_desc->p.skb == NULL) {
1245 netif_err(qdev, probe, qdev->ndev,
1246 "Couldn't get an skb.\n");
1247 rx_ring->sbq_clean_idx = clean_idx;
1250 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1251 map = pci_map_single(qdev->pdev,
1252 sbq_desc->p.skb->data,
1253 rx_ring->sbq_buf_size,
1254 PCI_DMA_FROMDEVICE);
1255 if (pci_dma_mapping_error(qdev->pdev, map)) {
1256 netif_err(qdev, ifup, qdev->ndev,
1257 "PCI mapping failed.\n");
1258 rx_ring->sbq_clean_idx = clean_idx;
1259 dev_kfree_skb_any(sbq_desc->p.skb);
1260 sbq_desc->p.skb = NULL;
1263 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1264 dma_unmap_len_set(sbq_desc, maplen,
1265 rx_ring->sbq_buf_size);
1266 *sbq_desc->addr = cpu_to_le64(map);
1270 if (clean_idx == rx_ring->sbq_len)
1273 rx_ring->sbq_clean_idx = clean_idx;
1274 rx_ring->sbq_prod_idx += 16;
1275 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1276 rx_ring->sbq_prod_idx = 0;
1277 rx_ring->sbq_free_cnt -= 16;
1280 if (start_idx != clean_idx) {
1281 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1282 "sbq: updating prod idx = %d.\n",
1283 rx_ring->sbq_prod_idx);
1284 ql_write_db_reg(rx_ring->sbq_prod_idx,
1285 rx_ring->sbq_prod_idx_db_reg);
1289 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1290 struct rx_ring *rx_ring)
1292 ql_update_sbq(qdev, rx_ring);
1293 ql_update_lbq(qdev, rx_ring);
1296 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1297 * fails at some stage, or from the interrupt when a tx completes.
1299 static void ql_unmap_send(struct ql_adapter *qdev,
1300 struct tx_ring_desc *tx_ring_desc, int mapped)
1303 for (i = 0; i < mapped; i++) {
1304 if (i == 0 || (i == 7 && mapped > 7)) {
1306 * Unmap the skb->data area, or the
1307 * external sglist (AKA the Outbound
1308 * Address List (OAL)).
1309 * If its the zeroeth element, then it's
1310 * the skb->data area. If it's the 7th
1311 * element and there is more than 6 frags,
1315 netif_printk(qdev, tx_done, KERN_DEBUG,
1317 "unmapping OAL area.\n");
1319 pci_unmap_single(qdev->pdev,
1320 dma_unmap_addr(&tx_ring_desc->map[i],
1322 dma_unmap_len(&tx_ring_desc->map[i],
1326 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1327 "unmapping frag %d.\n", i);
1328 pci_unmap_page(qdev->pdev,
1329 dma_unmap_addr(&tx_ring_desc->map[i],
1331 dma_unmap_len(&tx_ring_desc->map[i],
1332 maplen), PCI_DMA_TODEVICE);
1338 /* Map the buffers for this transmit. This will return
1339 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1341 static int ql_map_send(struct ql_adapter *qdev,
1342 struct ob_mac_iocb_req *mac_iocb_ptr,
1343 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1345 int len = skb_headlen(skb);
1347 int frag_idx, err, map_idx = 0;
1348 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1349 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1353 "frag_cnt = %d.\n", frag_cnt);
1356 * Map the skb buffer first.
1358 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1360 err = pci_dma_mapping_error(qdev->pdev, map);
1362 netif_err(qdev, tx_queued, qdev->ndev,
1363 "PCI mapping failed with error: %d\n", err);
1365 return NETDEV_TX_BUSY;
1368 tbd->len = cpu_to_le32(len);
1369 tbd->addr = cpu_to_le64(map);
1370 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1371 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1375 * This loop fills the remainder of the 8 address descriptors
1376 * in the IOCB. If there are more than 7 fragments, then the
1377 * eighth address desc will point to an external list (OAL).
1378 * When this happens, the remainder of the frags will be stored
1381 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1382 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1384 if (frag_idx == 6 && frag_cnt > 7) {
1385 /* Let's tack on an sglist.
1386 * Our control block will now
1388 * iocb->seg[0] = skb->data
1389 * iocb->seg[1] = frag[0]
1390 * iocb->seg[2] = frag[1]
1391 * iocb->seg[3] = frag[2]
1392 * iocb->seg[4] = frag[3]
1393 * iocb->seg[5] = frag[4]
1394 * iocb->seg[6] = frag[5]
1395 * iocb->seg[7] = ptr to OAL (external sglist)
1396 * oal->seg[0] = frag[6]
1397 * oal->seg[1] = frag[7]
1398 * oal->seg[2] = frag[8]
1399 * oal->seg[3] = frag[9]
1400 * oal->seg[4] = frag[10]
1403 /* Tack on the OAL in the eighth segment of IOCB. */
1404 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 err = pci_dma_mapping_error(qdev->pdev, map);
1409 netif_err(qdev, tx_queued, qdev->ndev,
1410 "PCI mapping outbound address list with error: %d\n",
1415 tbd->addr = cpu_to_le64(map);
1417 * The length is the number of fragments
1418 * that remain to be mapped times the length
1419 * of our sglist (OAL).
1422 cpu_to_le32((sizeof(struct tx_buf_desc) *
1423 (frag_cnt - frag_idx)) | TX_DESC_C);
1424 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1426 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1427 sizeof(struct oal));
1428 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1433 pci_map_page(qdev->pdev, frag->page,
1434 frag->page_offset, frag->size,
1437 err = pci_dma_mapping_error(qdev->pdev, map);
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(frag->size);
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct skb_frag_struct *rx_frag;
1480 struct napi_struct *napi = &rx_ring->napi;
1482 napi->dev = qdev->ndev;
1484 skb = napi_get_frags(napi);
1486 netif_err(qdev, drv, qdev->ndev,
1487 "Couldn't get an skb, exiting.\n");
1488 rx_ring->rx_dropped++;
1489 put_page(lbq_desc->p.pg_chunk.page);
1492 prefetch(lbq_desc->p.pg_chunk.va);
1493 rx_frag = skb_shinfo(skb)->frags;
1494 nr_frags = skb_shinfo(skb)->nr_frags;
1495 rx_frag += nr_frags;
1496 rx_frag->page = lbq_desc->p.pg_chunk.page;
1497 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1498 rx_frag->size = length;
1501 skb->data_len += length;
1502 skb->truesize += length;
1503 skb_shinfo(skb)->nr_frags++;
1505 rx_ring->rx_packets++;
1506 rx_ring->rx_bytes += length;
1507 skb->ip_summed = CHECKSUM_UNNECESSARY;
1508 skb_record_rx_queue(skb, rx_ring->cq_id);
1509 if (qdev->vlgrp && (vlan_id != 0xffff))
1510 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1512 napi_gro_frags(napi);
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517 struct rx_ring *rx_ring,
1518 struct ib_mac_iocb_rsp *ib_mac_rsp,
1522 struct net_device *ndev = qdev->ndev;
1523 struct sk_buff *skb = NULL;
1525 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526 struct napi_struct *napi = &rx_ring->napi;
1528 skb = netdev_alloc_skb(ndev, length);
1530 netif_err(qdev, drv, qdev->ndev,
1531 "Couldn't get an skb, need to unwind!.\n");
1532 rx_ring->rx_dropped++;
1533 put_page(lbq_desc->p.pg_chunk.page);
1537 addr = lbq_desc->p.pg_chunk.va;
1541 /* Frame error, so drop the packet. */
1542 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1543 netif_info(qdev, drv, qdev->ndev,
1544 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1545 rx_ring->rx_errors++;
1549 /* The max framesize filter on this chip is set higher than
1550 * MTU since FCoE uses 2k frames.
1552 if (skb->len > ndev->mtu + ETH_HLEN) {
1553 netif_err(qdev, drv, qdev->ndev,
1554 "Segment too small, dropping.\n");
1555 rx_ring->rx_dropped++;
1558 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1563 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565 skb->len += length-ETH_HLEN;
1566 skb->data_len += length-ETH_HLEN;
1567 skb->truesize += length-ETH_HLEN;
1569 rx_ring->rx_packets++;
1570 rx_ring->rx_bytes += skb->len;
1571 skb->protocol = eth_type_trans(skb, ndev);
1572 skb_checksum_none_assert(skb);
1574 if (qdev->rx_csum &&
1575 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1578 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1579 "TCP checksum done!\n");
1580 skb->ip_summed = CHECKSUM_UNNECESSARY;
1581 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1582 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1583 /* Unfragmented ipv4 UDP frame. */
1584 struct iphdr *iph = (struct iphdr *) skb->data;
1585 if (!(iph->frag_off &
1586 cpu_to_be16(IP_MF|IP_OFFSET))) {
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 netif_printk(qdev, rx_status, KERN_DEBUG,
1590 "TCP checksum done!\n");
1595 skb_record_rx_queue(skb, rx_ring->cq_id);
1596 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1597 if (qdev->vlgrp && (vlan_id != 0xffff))
1598 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1600 napi_gro_receive(napi, skb);
1602 if (qdev->vlgrp && (vlan_id != 0xffff))
1603 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1605 netif_receive_skb(skb);
1609 dev_kfree_skb_any(skb);
1610 put_page(lbq_desc->p.pg_chunk.page);
1613 /* Process an inbound completion from an rx ring. */
1614 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1615 struct rx_ring *rx_ring,
1616 struct ib_mac_iocb_rsp *ib_mac_rsp,
1620 struct net_device *ndev = qdev->ndev;
1621 struct sk_buff *skb = NULL;
1622 struct sk_buff *new_skb = NULL;
1623 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1625 skb = sbq_desc->p.skb;
1626 /* Allocate new_skb and copy */
1627 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1628 if (new_skb == NULL) {
1629 netif_err(qdev, probe, qdev->ndev,
1630 "No skb available, drop the packet.\n");
1631 rx_ring->rx_dropped++;
1634 skb_reserve(new_skb, NET_IP_ALIGN);
1635 memcpy(skb_put(new_skb, length), skb->data, length);
1638 /* Frame error, so drop the packet. */
1639 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1640 netif_info(qdev, drv, qdev->ndev,
1641 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1642 dev_kfree_skb_any(skb);
1643 rx_ring->rx_errors++;
1647 /* loopback self test for ethtool */
1648 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1649 ql_check_lb_frame(qdev, skb);
1650 dev_kfree_skb_any(skb);
1654 /* The max framesize filter on this chip is set higher than
1655 * MTU since FCoE uses 2k frames.
1657 if (skb->len > ndev->mtu + ETH_HLEN) {
1658 dev_kfree_skb_any(skb);
1659 rx_ring->rx_dropped++;
1663 prefetch(skb->data);
1665 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1670 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1671 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1672 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1673 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1675 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1676 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677 "Promiscuous Packet.\n");
1679 rx_ring->rx_packets++;
1680 rx_ring->rx_bytes += skb->len;
1681 skb->protocol = eth_type_trans(skb, ndev);
1682 skb_checksum_none_assert(skb);
1684 /* If rx checksum is on, and there are no
1685 * csum or frame errors.
1687 if (qdev->rx_csum &&
1688 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1691 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692 "TCP checksum done!\n");
1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1694 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1695 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1696 /* Unfragmented ipv4 UDP frame. */
1697 struct iphdr *iph = (struct iphdr *) skb->data;
1698 if (!(iph->frag_off &
1699 ntohs(IP_MF|IP_OFFSET))) {
1700 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701 netif_printk(qdev, rx_status, KERN_DEBUG,
1703 "TCP checksum done!\n");
1708 skb_record_rx_queue(skb, rx_ring->cq_id);
1709 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1710 if (qdev->vlgrp && (vlan_id != 0xffff))
1711 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1714 napi_gro_receive(&rx_ring->napi, skb);
1716 if (qdev->vlgrp && (vlan_id != 0xffff))
1717 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1719 netif_receive_skb(skb);
1723 static void ql_realign_skb(struct sk_buff *skb, int len)
1725 void *temp_addr = skb->data;
1727 /* Undo the skb_reserve(skb,32) we did before
1728 * giving to hardware, and realign data on
1729 * a 2-byte boundary.
1731 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1732 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1733 skb_copy_to_linear_data(skb, temp_addr,
1738 * This function builds an skb for the given inbound
1739 * completion. It will be rewritten for readability in the near
1740 * future, but for not it works well.
1742 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1743 struct rx_ring *rx_ring,
1744 struct ib_mac_iocb_rsp *ib_mac_rsp)
1746 struct bq_desc *lbq_desc;
1747 struct bq_desc *sbq_desc;
1748 struct sk_buff *skb = NULL;
1749 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1750 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1753 * Handle the header buffer if present.
1755 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1756 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1757 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1758 "Header of %d bytes in small buffer.\n", hdr_len);
1760 * Headers fit nicely into a small buffer.
1762 sbq_desc = ql_get_curr_sbuf(rx_ring);
1763 pci_unmap_single(qdev->pdev,
1764 dma_unmap_addr(sbq_desc, mapaddr),
1765 dma_unmap_len(sbq_desc, maplen),
1766 PCI_DMA_FROMDEVICE);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, hdr_len);
1769 skb_put(skb, hdr_len);
1770 sbq_desc->p.skb = NULL;
1774 * Handle the data buffer(s).
1776 if (unlikely(!length)) { /* Is there data too? */
1777 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1778 "No Data buffer in this packet.\n");
1782 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1783 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1784 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1785 "Headers in small, data of %d bytes in small, combine them.\n",
1788 * Data is less than small buffer size so it's
1789 * stuffed in a small buffer.
1790 * For this case we append the data
1791 * from the "data" small buffer to the "header" small
1794 sbq_desc = ql_get_curr_sbuf(rx_ring);
1795 pci_dma_sync_single_for_cpu(qdev->pdev,
1797 (sbq_desc, mapaddr),
1800 PCI_DMA_FROMDEVICE);
1801 memcpy(skb_put(skb, length),
1802 sbq_desc->p.skb->data, length);
1803 pci_dma_sync_single_for_device(qdev->pdev,
1810 PCI_DMA_FROMDEVICE);
1812 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1813 "%d bytes in a single small buffer.\n",
1815 sbq_desc = ql_get_curr_sbuf(rx_ring);
1816 skb = sbq_desc->p.skb;
1817 ql_realign_skb(skb, length);
1818 skb_put(skb, length);
1819 pci_unmap_single(qdev->pdev,
1820 dma_unmap_addr(sbq_desc,
1822 dma_unmap_len(sbq_desc,
1824 PCI_DMA_FROMDEVICE);
1825 sbq_desc->p.skb = NULL;
1827 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1828 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "Header in small, %d bytes in large. Chain large to small!\n",
1833 * The data is in a single large buffer. We
1834 * chain it to the header buffer's skb and let
1837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839 "Chaining page at offset = %d, for %d bytes to skb.\n",
1840 lbq_desc->p.pg_chunk.offset, length);
1841 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1842 lbq_desc->p.pg_chunk.offset,
1845 skb->data_len += length;
1846 skb->truesize += length;
1849 * The headers and data are in a single large buffer. We
1850 * copy it to a new skb and let it go. This can happen with
1851 * jumbo mtu on a non-TCP/UDP frame.
1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854 skb = netdev_alloc_skb(qdev->ndev, length);
1856 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857 "No skb available, drop the packet.\n");
1860 pci_unmap_page(qdev->pdev,
1861 dma_unmap_addr(lbq_desc,
1863 dma_unmap_len(lbq_desc, maplen),
1864 PCI_DMA_FROMDEVICE);
1865 skb_reserve(skb, NET_IP_ALIGN);
1866 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1869 skb_fill_page_desc(skb, 0,
1870 lbq_desc->p.pg_chunk.page,
1871 lbq_desc->p.pg_chunk.offset,
1874 skb->data_len += length;
1875 skb->truesize += length;
1877 __pskb_pull_tail(skb,
1878 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1879 VLAN_ETH_HLEN : ETH_HLEN);
1883 * The data is in a chain of large buffers
1884 * pointed to by a small buffer. We loop
1885 * thru and chain them to the our small header
1887 * frags: There are 18 max frags and our small
1888 * buffer will hold 32 of them. The thing is,
1889 * we'll use 3 max for our 9000 byte jumbo
1890 * frames. If the MTU goes up we could
1891 * eventually be in trouble.
1894 sbq_desc = ql_get_curr_sbuf(rx_ring);
1895 pci_unmap_single(qdev->pdev,
1896 dma_unmap_addr(sbq_desc, mapaddr),
1897 dma_unmap_len(sbq_desc, maplen),
1898 PCI_DMA_FROMDEVICE);
1899 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1901 * This is an non TCP/UDP IP frame, so
1902 * the headers aren't split into a small
1903 * buffer. We have to use the small buffer
1904 * that contains our sg list as our skb to
1905 * send upstairs. Copy the sg list here to
1906 * a local buffer and use it to find the
1909 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1910 "%d bytes of headers & data in chain of large.\n",
1912 skb = sbq_desc->p.skb;
1913 sbq_desc->p.skb = NULL;
1914 skb_reserve(skb, NET_IP_ALIGN);
1916 while (length > 0) {
1917 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918 size = (length < rx_ring->lbq_buf_size) ? length :
1919 rx_ring->lbq_buf_size;
1921 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1922 "Adding page %d to skb for %d bytes.\n",
1924 skb_fill_page_desc(skb, i,
1925 lbq_desc->p.pg_chunk.page,
1926 lbq_desc->p.pg_chunk.offset,
1929 skb->data_len += size;
1930 skb->truesize += size;
1934 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1935 VLAN_ETH_HLEN : ETH_HLEN);
1940 /* Process an inbound completion from an rx ring. */
1941 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1942 struct rx_ring *rx_ring,
1943 struct ib_mac_iocb_rsp *ib_mac_rsp,
1946 struct net_device *ndev = qdev->ndev;
1947 struct sk_buff *skb = NULL;
1949 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1951 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1952 if (unlikely(!skb)) {
1953 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954 "No skb available, drop packet.\n");
1955 rx_ring->rx_dropped++;
1959 /* Frame error, so drop the packet. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1961 netif_info(qdev, drv, qdev->ndev,
1962 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1963 dev_kfree_skb_any(skb);
1964 rx_ring->rx_errors++;
1968 /* The max framesize filter on this chip is set higher than
1969 * MTU since FCoE uses 2k frames.
1971 if (skb->len > ndev->mtu + ETH_HLEN) {
1972 dev_kfree_skb_any(skb);
1973 rx_ring->rx_dropped++;
1977 /* loopback self test for ethtool */
1978 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1979 ql_check_lb_frame(qdev, skb);
1980 dev_kfree_skb_any(skb);
1984 prefetch(skb->data);
1986 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1987 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1988 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1989 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1990 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1991 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1992 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1993 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1994 rx_ring->rx_multicast++;
1996 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1997 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998 "Promiscuous Packet.\n");
2001 skb->protocol = eth_type_trans(skb, ndev);
2002 skb_checksum_none_assert(skb);
2004 /* If rx checksum is on, and there are no
2005 * csum or frame errors.
2007 if (qdev->rx_csum &&
2008 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "TCP checksum done!\n");
2013 skb->ip_summed = CHECKSUM_UNNECESSARY;
2014 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2015 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2016 /* Unfragmented ipv4 UDP frame. */
2017 struct iphdr *iph = (struct iphdr *) skb->data;
2018 if (!(iph->frag_off &
2019 ntohs(IP_MF|IP_OFFSET))) {
2020 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2022 "TCP checksum done!\n");
2027 rx_ring->rx_packets++;
2028 rx_ring->rx_bytes += skb->len;
2029 skb_record_rx_queue(skb, rx_ring->cq_id);
2030 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2032 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2034 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2037 napi_gro_receive(&rx_ring->napi, skb);
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2042 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2044 netif_receive_skb(skb);
2048 /* Process an inbound completion from an rx ring. */
2049 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2050 struct rx_ring *rx_ring,
2051 struct ib_mac_iocb_rsp *ib_mac_rsp)
2053 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2054 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2055 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2056 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2058 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2060 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2061 /* The data and headers are split into
2064 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2067 /* The data fit in a single small buffer.
2068 * Allocate a new skb, copy the data and
2069 * return the buffer to the free pool.
2071 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2073 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2074 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2075 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2076 /* TCP packet in a page chunk that's been checksummed.
2077 * Tack it on to our GRO skb and let it go.
2079 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2081 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2082 /* Non-TCP packet in a page chunk. Allocate an
2083 * skb, tack it on frags, and send it up.
2085 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2088 /* Non-TCP/UDP large frames that span multiple buffers
2089 * can be processed corrrectly by the split frame logic.
2091 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2095 return (unsigned long)length;
2098 /* Process an outbound completion from an rx ring. */
2099 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2100 struct ob_mac_iocb_rsp *mac_rsp)
2102 struct tx_ring *tx_ring;
2103 struct tx_ring_desc *tx_ring_desc;
2105 QL_DUMP_OB_MAC_RSP(mac_rsp);
2106 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2107 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2108 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2109 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2110 tx_ring->tx_packets++;
2111 dev_kfree_skb(tx_ring_desc->skb);
2112 tx_ring_desc->skb = NULL;
2114 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2117 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2118 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2119 netif_warn(qdev, tx_done, qdev->ndev,
2120 "Total descriptor length did not match transfer length.\n");
2122 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2123 netif_warn(qdev, tx_done, qdev->ndev,
2124 "Frame too short to be valid, not sent.\n");
2126 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2127 netif_warn(qdev, tx_done, qdev->ndev,
2128 "Frame too long, but sent anyway.\n");
2130 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2131 netif_warn(qdev, tx_done, qdev->ndev,
2132 "PCI backplane error. Frame not sent.\n");
2135 atomic_inc(&tx_ring->tx_count);
2138 /* Fire up a handler to reset the MPI processor. */
2139 void ql_queue_fw_error(struct ql_adapter *qdev)
2142 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2145 void ql_queue_asic_error(struct ql_adapter *qdev)
2148 ql_disable_interrupts(qdev);
2149 /* Clear adapter up bit to signal the recovery
2150 * process that it shouldn't kill the reset worker
2153 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2154 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2157 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2158 struct ib_ae_iocb_rsp *ib_ae_rsp)
2160 switch (ib_ae_rsp->event) {
2161 case MGMT_ERR_EVENT:
2162 netif_err(qdev, rx_err, qdev->ndev,
2163 "Management Processor Fatal Error.\n");
2164 ql_queue_fw_error(qdev);
2167 case CAM_LOOKUP_ERR_EVENT:
2168 netif_err(qdev, link, qdev->ndev,
2169 "Multiple CAM hits lookup occurred.\n");
2170 netif_err(qdev, drv, qdev->ndev,
2171 "This event shouldn't occur.\n");
2172 ql_queue_asic_error(qdev);
2175 case SOFT_ECC_ERROR_EVENT:
2176 netif_err(qdev, rx_err, qdev->ndev,
2177 "Soft ECC error detected.\n");
2178 ql_queue_asic_error(qdev);
2181 case PCI_ERR_ANON_BUF_RD:
2182 netif_err(qdev, rx_err, qdev->ndev,
2183 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2185 ql_queue_asic_error(qdev);
2189 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2191 ql_queue_asic_error(qdev);
2196 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2198 struct ql_adapter *qdev = rx_ring->qdev;
2199 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2200 struct ob_mac_iocb_rsp *net_rsp = NULL;
2203 struct tx_ring *tx_ring;
2204 /* While there are entries in the completion queue. */
2205 while (prod != rx_ring->cnsmr_idx) {
2207 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2211 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2213 switch (net_rsp->opcode) {
2215 case OPCODE_OB_MAC_TSO_IOCB:
2216 case OPCODE_OB_MAC_IOCB:
2217 ql_process_mac_tx_intr(qdev, net_rsp);
2220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2225 ql_update_cq(rx_ring);
2226 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230 ql_write_cq_idx(rx_ring);
2231 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2232 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2233 if (atomic_read(&tx_ring->queue_stopped) &&
2234 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2236 * The queue got stopped because the tx_ring was full.
2237 * Wake it up, because it's now at least 25% empty.
2239 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2245 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2247 struct ql_adapter *qdev = rx_ring->qdev;
2248 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2249 struct ql_net_rsp_iocb *net_rsp;
2252 /* While there are entries in the completion queue. */
2253 while (prod != rx_ring->cnsmr_idx) {
2255 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2257 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2259 net_rsp = rx_ring->curr_entry;
2261 switch (net_rsp->opcode) {
2262 case OPCODE_IB_MAC_IOCB:
2263 ql_process_mac_rx_intr(qdev, rx_ring,
2264 (struct ib_mac_iocb_rsp *)
2268 case OPCODE_IB_AE_IOCB:
2269 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2273 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2279 ql_update_cq(rx_ring);
2280 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2281 if (count == budget)
2284 ql_update_buffer_queues(qdev, rx_ring);
2285 ql_write_cq_idx(rx_ring);
2289 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2291 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2292 struct ql_adapter *qdev = rx_ring->qdev;
2293 struct rx_ring *trx_ring;
2294 int i, work_done = 0;
2295 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2297 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2300 /* Service the TX rings first. They start
2301 * right after the RSS rings. */
2302 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2303 trx_ring = &qdev->rx_ring[i];
2304 /* If this TX completion ring belongs to this vector and
2305 * it's not empty then service it.
2307 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2308 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2309 trx_ring->cnsmr_idx)) {
2310 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2311 "%s: Servicing TX completion ring %d.\n",
2312 __func__, trx_ring->cq_id);
2313 ql_clean_outbound_rx_ring(trx_ring);
2318 * Now service the RSS ring if it's active.
2320 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2321 rx_ring->cnsmr_idx) {
2322 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2323 "%s: Servicing RX completion ring %d.\n",
2324 __func__, rx_ring->cq_id);
2325 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2328 if (work_done < budget) {
2329 napi_complete(napi);
2330 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2335 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2341 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2342 "Turning on VLAN in NIC_RCV_CFG.\n");
2343 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2344 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2346 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347 "Turning off VLAN in NIC_RCV_CFG.\n");
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2352 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2354 struct ql_adapter *qdev = netdev_priv(ndev);
2355 u32 enable_bit = MAC_ADDR_E;
2358 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2361 if (ql_set_mac_addr_reg
2362 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to init vlan address.\n");
2366 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2369 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2371 struct ql_adapter *qdev = netdev_priv(ndev);
2375 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2379 if (ql_set_mac_addr_reg
2380 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2381 netif_err(qdev, ifup, qdev->ndev,
2382 "Failed to clear vlan address.\n");
2384 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2388 static void qlge_restore_vlan(struct ql_adapter *qdev)
2390 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2394 for (vid = 0; vid < VLAN_N_VID; vid++) {
2395 if (!vlan_group_get_device(qdev->vlgrp, vid))
2397 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2402 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2403 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2405 struct rx_ring *rx_ring = dev_id;
2406 napi_schedule(&rx_ring->napi);
2410 /* This handles a fatal error, MPI activity, and the default
2411 * rx_ring in an MSI-X multiple vector environment.
2412 * In MSI/Legacy environment it also process the rest of
2415 static irqreturn_t qlge_isr(int irq, void *dev_id)
2417 struct rx_ring *rx_ring = dev_id;
2418 struct ql_adapter *qdev = rx_ring->qdev;
2419 struct intr_context *intr_context = &qdev->intr_context[0];
2423 spin_lock(&qdev->hw_lock);
2424 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2425 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2426 "Shared Interrupt, Not ours!\n");
2427 spin_unlock(&qdev->hw_lock);
2430 spin_unlock(&qdev->hw_lock);
2432 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2435 * Check for fatal error.
2438 ql_queue_asic_error(qdev);
2439 netif_err(qdev, intr, qdev->ndev,
2440 "Got fatal error, STS = %x.\n", var);
2441 var = ql_read32(qdev, ERR_STS);
2442 netif_err(qdev, intr, qdev->ndev,
2443 "Resetting chip. Error Status Register = 0x%x\n", var);
2448 * Check MPI processor activity.
2450 if ((var & STS_PI) &&
2451 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2453 * We've got an async event or mailbox completion.
2454 * Handle it and clear the source of the interrupt.
2456 netif_err(qdev, intr, qdev->ndev,
2457 "Got MPI processor interrupt.\n");
2458 ql_disable_completion_interrupt(qdev, intr_context->intr);
2459 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2460 queue_delayed_work_on(smp_processor_id(),
2461 qdev->workqueue, &qdev->mpi_work, 0);
2466 * Get the bit-mask that shows the active queues for this
2467 * pass. Compare it to the queues that this irq services
2468 * and call napi if there's a match.
2470 var = ql_read32(qdev, ISR1);
2471 if (var & intr_context->irq_mask) {
2472 netif_info(qdev, intr, qdev->ndev,
2473 "Waking handler for rx_ring[0].\n");
2474 ql_disable_completion_interrupt(qdev, intr_context->intr);
2475 napi_schedule(&rx_ring->napi);
2478 ql_enable_completion_interrupt(qdev, intr_context->intr);
2479 return work_done ? IRQ_HANDLED : IRQ_NONE;
2482 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2485 if (skb_is_gso(skb)) {
2487 if (skb_header_cloned(skb)) {
2488 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2493 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2494 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2495 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2496 mac_iocb_ptr->total_hdrs_len =
2497 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2498 mac_iocb_ptr->net_trans_offset =
2499 cpu_to_le16(skb_network_offset(skb) |
2500 skb_transport_offset(skb)
2501 << OB_MAC_TRANSPORT_HDR_SHIFT);
2502 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2503 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2504 if (likely(skb->protocol == htons(ETH_P_IP))) {
2505 struct iphdr *iph = ip_hdr(skb);
2507 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2508 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2512 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2513 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2514 tcp_hdr(skb)->check =
2515 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2516 &ipv6_hdr(skb)->daddr,
2524 static void ql_hw_csum_setup(struct sk_buff *skb,
2525 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2528 struct iphdr *iph = ip_hdr(skb);
2530 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2531 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2532 mac_iocb_ptr->net_trans_offset =
2533 cpu_to_le16(skb_network_offset(skb) |
2534 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2536 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2537 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2538 if (likely(iph->protocol == IPPROTO_TCP)) {
2539 check = &(tcp_hdr(skb)->check);
2540 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2541 mac_iocb_ptr->total_hdrs_len =
2542 cpu_to_le16(skb_transport_offset(skb) +
2543 (tcp_hdr(skb)->doff << 2));
2545 check = &(udp_hdr(skb)->check);
2546 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2547 mac_iocb_ptr->total_hdrs_len =
2548 cpu_to_le16(skb_transport_offset(skb) +
2549 sizeof(struct udphdr));
2551 *check = ~csum_tcpudp_magic(iph->saddr,
2552 iph->daddr, len, iph->protocol, 0);
2555 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2557 struct tx_ring_desc *tx_ring_desc;
2558 struct ob_mac_iocb_req *mac_iocb_ptr;
2559 struct ql_adapter *qdev = netdev_priv(ndev);
2561 struct tx_ring *tx_ring;
2562 u32 tx_ring_idx = (u32) skb->queue_mapping;
2564 tx_ring = &qdev->tx_ring[tx_ring_idx];
2566 if (skb_padto(skb, ETH_ZLEN))
2567 return NETDEV_TX_OK;
2569 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2570 netif_info(qdev, tx_queued, qdev->ndev,
2571 "%s: shutting down tx queue %d du to lack of resources.\n",
2572 __func__, tx_ring_idx);
2573 netif_stop_subqueue(ndev, tx_ring->wq_id);
2574 atomic_inc(&tx_ring->queue_stopped);
2575 tx_ring->tx_errors++;
2576 return NETDEV_TX_BUSY;
2578 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2579 mac_iocb_ptr = tx_ring_desc->queue_entry;
2580 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2582 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2583 mac_iocb_ptr->tid = tx_ring_desc->index;
2584 /* We use the upper 32-bits to store the tx queue for this IO.
2585 * When we get the completion we can use it to establish the context.
2587 mac_iocb_ptr->txq_idx = tx_ring_idx;
2588 tx_ring_desc->skb = skb;
2590 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2592 if (vlan_tx_tag_present(skb)) {
2593 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2594 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2595 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2596 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2598 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600 dev_kfree_skb_any(skb);
2601 return NETDEV_TX_OK;
2602 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2603 ql_hw_csum_setup(skb,
2604 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2606 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2608 netif_err(qdev, tx_queued, qdev->ndev,
2609 "Could not map the segments.\n");
2610 tx_ring->tx_errors++;
2611 return NETDEV_TX_BUSY;
2613 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2614 tx_ring->prod_idx++;
2615 if (tx_ring->prod_idx == tx_ring->wq_len)
2616 tx_ring->prod_idx = 0;
2619 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2620 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2621 "tx queued, slot %d, len %d\n",
2622 tx_ring->prod_idx, skb->len);
2624 atomic_dec(&tx_ring->tx_count);
2625 return NETDEV_TX_OK;
2629 static void ql_free_shadow_space(struct ql_adapter *qdev)
2631 if (qdev->rx_ring_shadow_reg_area) {
2632 pci_free_consistent(qdev->pdev,
2634 qdev->rx_ring_shadow_reg_area,
2635 qdev->rx_ring_shadow_reg_dma);
2636 qdev->rx_ring_shadow_reg_area = NULL;
2638 if (qdev->tx_ring_shadow_reg_area) {
2639 pci_free_consistent(qdev->pdev,
2641 qdev->tx_ring_shadow_reg_area,
2642 qdev->tx_ring_shadow_reg_dma);
2643 qdev->tx_ring_shadow_reg_area = NULL;
2647 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2649 qdev->rx_ring_shadow_reg_area =
2650 pci_alloc_consistent(qdev->pdev,
2651 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2652 if (qdev->rx_ring_shadow_reg_area == NULL) {
2653 netif_err(qdev, ifup, qdev->ndev,
2654 "Allocation of RX shadow space failed.\n");
2657 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2658 qdev->tx_ring_shadow_reg_area =
2659 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2660 &qdev->tx_ring_shadow_reg_dma);
2661 if (qdev->tx_ring_shadow_reg_area == NULL) {
2662 netif_err(qdev, ifup, qdev->ndev,
2663 "Allocation of TX shadow space failed.\n");
2664 goto err_wqp_sh_area;
2666 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2670 pci_free_consistent(qdev->pdev,
2672 qdev->rx_ring_shadow_reg_area,
2673 qdev->rx_ring_shadow_reg_dma);
2677 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2679 struct tx_ring_desc *tx_ring_desc;
2681 struct ob_mac_iocb_req *mac_iocb_ptr;
2683 mac_iocb_ptr = tx_ring->wq_base;
2684 tx_ring_desc = tx_ring->q;
2685 for (i = 0; i < tx_ring->wq_len; i++) {
2686 tx_ring_desc->index = i;
2687 tx_ring_desc->skb = NULL;
2688 tx_ring_desc->queue_entry = mac_iocb_ptr;
2692 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2693 atomic_set(&tx_ring->queue_stopped, 0);
2696 static void ql_free_tx_resources(struct ql_adapter *qdev,
2697 struct tx_ring *tx_ring)
2699 if (tx_ring->wq_base) {
2700 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2701 tx_ring->wq_base, tx_ring->wq_base_dma);
2702 tx_ring->wq_base = NULL;
2708 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2709 struct tx_ring *tx_ring)
2712 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2713 &tx_ring->wq_base_dma);
2715 if ((tx_ring->wq_base == NULL) ||
2716 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2717 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2721 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2722 if (tx_ring->q == NULL)
2727 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2728 tx_ring->wq_base, tx_ring->wq_base_dma);
2732 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2734 struct bq_desc *lbq_desc;
2736 uint32_t curr_idx, clean_idx;
2738 curr_idx = rx_ring->lbq_curr_idx;
2739 clean_idx = rx_ring->lbq_clean_idx;
2740 while (curr_idx != clean_idx) {
2741 lbq_desc = &rx_ring->lbq[curr_idx];
2743 if (lbq_desc->p.pg_chunk.last_flag) {
2744 pci_unmap_page(qdev->pdev,
2745 lbq_desc->p.pg_chunk.map,
2746 ql_lbq_block_size(qdev),
2747 PCI_DMA_FROMDEVICE);
2748 lbq_desc->p.pg_chunk.last_flag = 0;
2751 put_page(lbq_desc->p.pg_chunk.page);
2752 lbq_desc->p.pg_chunk.page = NULL;
2754 if (++curr_idx == rx_ring->lbq_len)
2760 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2763 struct bq_desc *sbq_desc;
2765 for (i = 0; i < rx_ring->sbq_len; i++) {
2766 sbq_desc = &rx_ring->sbq[i];
2767 if (sbq_desc == NULL) {
2768 netif_err(qdev, ifup, qdev->ndev,
2769 "sbq_desc %d is NULL.\n", i);
2772 if (sbq_desc->p.skb) {
2773 pci_unmap_single(qdev->pdev,
2774 dma_unmap_addr(sbq_desc, mapaddr),
2775 dma_unmap_len(sbq_desc, maplen),
2776 PCI_DMA_FROMDEVICE);
2777 dev_kfree_skb(sbq_desc->p.skb);
2778 sbq_desc->p.skb = NULL;
2783 /* Free all large and small rx buffers associated
2784 * with the completion queues for this device.
2786 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2789 struct rx_ring *rx_ring;
2791 for (i = 0; i < qdev->rx_ring_count; i++) {
2792 rx_ring = &qdev->rx_ring[i];
2794 ql_free_lbq_buffers(qdev, rx_ring);
2796 ql_free_sbq_buffers(qdev, rx_ring);
2800 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2802 struct rx_ring *rx_ring;
2805 for (i = 0; i < qdev->rx_ring_count; i++) {
2806 rx_ring = &qdev->rx_ring[i];
2807 if (rx_ring->type != TX_Q)
2808 ql_update_buffer_queues(qdev, rx_ring);
2812 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2813 struct rx_ring *rx_ring)
2816 struct bq_desc *lbq_desc;
2817 __le64 *bq = rx_ring->lbq_base;
2819 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2820 for (i = 0; i < rx_ring->lbq_len; i++) {
2821 lbq_desc = &rx_ring->lbq[i];
2822 memset(lbq_desc, 0, sizeof(*lbq_desc));
2823 lbq_desc->index = i;
2824 lbq_desc->addr = bq;
2829 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2830 struct rx_ring *rx_ring)
2833 struct bq_desc *sbq_desc;
2834 __le64 *bq = rx_ring->sbq_base;
2836 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2837 for (i = 0; i < rx_ring->sbq_len; i++) {
2838 sbq_desc = &rx_ring->sbq[i];
2839 memset(sbq_desc, 0, sizeof(*sbq_desc));
2840 sbq_desc->index = i;
2841 sbq_desc->addr = bq;
2846 static void ql_free_rx_resources(struct ql_adapter *qdev,
2847 struct rx_ring *rx_ring)
2849 /* Free the small buffer queue. */
2850 if (rx_ring->sbq_base) {
2851 pci_free_consistent(qdev->pdev,
2853 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2854 rx_ring->sbq_base = NULL;
2857 /* Free the small buffer queue control blocks. */
2858 kfree(rx_ring->sbq);
2859 rx_ring->sbq = NULL;
2861 /* Free the large buffer queue. */
2862 if (rx_ring->lbq_base) {
2863 pci_free_consistent(qdev->pdev,
2865 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2866 rx_ring->lbq_base = NULL;
2869 /* Free the large buffer queue control blocks. */
2870 kfree(rx_ring->lbq);
2871 rx_ring->lbq = NULL;
2873 /* Free the rx queue. */
2874 if (rx_ring->cq_base) {
2875 pci_free_consistent(qdev->pdev,
2877 rx_ring->cq_base, rx_ring->cq_base_dma);
2878 rx_ring->cq_base = NULL;
2882 /* Allocate queues and buffers for this completions queue based
2883 * on the values in the parameter structure. */
2884 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2885 struct rx_ring *rx_ring)
2889 * Allocate the completion queue for this rx_ring.
2892 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2893 &rx_ring->cq_base_dma);
2895 if (rx_ring->cq_base == NULL) {
2896 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2900 if (rx_ring->sbq_len) {
2902 * Allocate small buffer queue.
2905 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2906 &rx_ring->sbq_base_dma);
2908 if (rx_ring->sbq_base == NULL) {
2909 netif_err(qdev, ifup, qdev->ndev,
2910 "Small buffer queue allocation failed.\n");
2915 * Allocate small buffer queue control blocks.
2918 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2920 if (rx_ring->sbq == NULL) {
2921 netif_err(qdev, ifup, qdev->ndev,
2922 "Small buffer queue control block allocation failed.\n");
2926 ql_init_sbq_ring(qdev, rx_ring);
2929 if (rx_ring->lbq_len) {
2931 * Allocate large buffer queue.
2934 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2935 &rx_ring->lbq_base_dma);
2937 if (rx_ring->lbq_base == NULL) {
2938 netif_err(qdev, ifup, qdev->ndev,
2939 "Large buffer queue allocation failed.\n");
2943 * Allocate large buffer queue control blocks.
2946 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2948 if (rx_ring->lbq == NULL) {
2949 netif_err(qdev, ifup, qdev->ndev,
2950 "Large buffer queue control block allocation failed.\n");
2954 ql_init_lbq_ring(qdev, rx_ring);
2960 ql_free_rx_resources(qdev, rx_ring);
2964 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2966 struct tx_ring *tx_ring;
2967 struct tx_ring_desc *tx_ring_desc;
2971 * Loop through all queues and free
2974 for (j = 0; j < qdev->tx_ring_count; j++) {
2975 tx_ring = &qdev->tx_ring[j];
2976 for (i = 0; i < tx_ring->wq_len; i++) {
2977 tx_ring_desc = &tx_ring->q[i];
2978 if (tx_ring_desc && tx_ring_desc->skb) {
2979 netif_err(qdev, ifdown, qdev->ndev,
2980 "Freeing lost SKB %p, from queue %d, index %d.\n",
2981 tx_ring_desc->skb, j,
2982 tx_ring_desc->index);
2983 ql_unmap_send(qdev, tx_ring_desc,
2984 tx_ring_desc->map_cnt);
2985 dev_kfree_skb(tx_ring_desc->skb);
2986 tx_ring_desc->skb = NULL;
2992 static void ql_free_mem_resources(struct ql_adapter *qdev)
2996 for (i = 0; i < qdev->tx_ring_count; i++)
2997 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2998 for (i = 0; i < qdev->rx_ring_count; i++)
2999 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3000 ql_free_shadow_space(qdev);
3003 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3007 /* Allocate space for our shadow registers and such. */
3008 if (ql_alloc_shadow_space(qdev))
3011 for (i = 0; i < qdev->rx_ring_count; i++) {
3012 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3013 netif_err(qdev, ifup, qdev->ndev,
3014 "RX resource allocation failed.\n");
3018 /* Allocate tx queue resources */
3019 for (i = 0; i < qdev->tx_ring_count; i++) {
3020 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3021 netif_err(qdev, ifup, qdev->ndev,
3022 "TX resource allocation failed.\n");
3029 ql_free_mem_resources(qdev);
3033 /* Set up the rx ring control block and pass it to the chip.
3034 * The control block is defined as
3035 * "Completion Queue Initialization Control Block", or cqicb.
3037 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3039 struct cqicb *cqicb = &rx_ring->cqicb;
3040 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3041 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3042 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3043 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3044 void __iomem *doorbell_area =
3045 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3049 __le64 *base_indirect_ptr;
3052 /* Set up the shadow registers for this ring. */
3053 rx_ring->prod_idx_sh_reg = shadow_reg;
3054 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3055 *rx_ring->prod_idx_sh_reg = 0;
3056 shadow_reg += sizeof(u64);
3057 shadow_reg_dma += sizeof(u64);
3058 rx_ring->lbq_base_indirect = shadow_reg;
3059 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3060 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3061 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3062 rx_ring->sbq_base_indirect = shadow_reg;
3063 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3065 /* PCI doorbell mem area + 0x00 for consumer index register */
3066 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3067 rx_ring->cnsmr_idx = 0;
3068 rx_ring->curr_entry = rx_ring->cq_base;
3070 /* PCI doorbell mem area + 0x04 for valid register */
3071 rx_ring->valid_db_reg = doorbell_area + 0x04;
3073 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3074 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3076 /* PCI doorbell mem area + 0x1c */
3077 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3079 memset((void *)cqicb, 0, sizeof(struct cqicb));
3080 cqicb->msix_vect = rx_ring->irq;
3082 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3083 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3085 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3087 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3090 * Set up the control block load flags.
3092 cqicb->flags = FLAGS_LC | /* Load queue base address */
3093 FLAGS_LV | /* Load MSI-X vector */
3094 FLAGS_LI; /* Load irq delay values */
3095 if (rx_ring->lbq_len) {
3096 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3097 tmp = (u64)rx_ring->lbq_base_dma;
3098 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3101 *base_indirect_ptr = cpu_to_le64(tmp);
3102 tmp += DB_PAGE_SIZE;
3103 base_indirect_ptr++;
3105 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3107 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3108 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3109 (u16) rx_ring->lbq_buf_size;
3110 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3111 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3112 (u16) rx_ring->lbq_len;
3113 cqicb->lbq_len = cpu_to_le16(bq_len);
3114 rx_ring->lbq_prod_idx = 0;
3115 rx_ring->lbq_curr_idx = 0;
3116 rx_ring->lbq_clean_idx = 0;
3117 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3119 if (rx_ring->sbq_len) {
3120 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3121 tmp = (u64)rx_ring->sbq_base_dma;
3122 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3125 *base_indirect_ptr = cpu_to_le64(tmp);
3126 tmp += DB_PAGE_SIZE;
3127 base_indirect_ptr++;
3129 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3131 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3132 cqicb->sbq_buf_size =
3133 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3134 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3135 (u16) rx_ring->sbq_len;
3136 cqicb->sbq_len = cpu_to_le16(bq_len);
3137 rx_ring->sbq_prod_idx = 0;
3138 rx_ring->sbq_curr_idx = 0;
3139 rx_ring->sbq_clean_idx = 0;
3140 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3142 switch (rx_ring->type) {
3144 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3145 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3148 /* Inbound completion handling rx_rings run in
3149 * separate NAPI contexts.
3151 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3153 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3154 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3157 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3158 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3160 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3161 "Initializing rx work queue.\n");
3162 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3163 CFG_LCQ, rx_ring->cq_id);
3165 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3171 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3173 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3174 void __iomem *doorbell_area =
3175 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3176 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3177 (tx_ring->wq_id * sizeof(u64));
3178 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3179 (tx_ring->wq_id * sizeof(u64));
3183 * Assign doorbell registers for this tx_ring.
3185 /* TX PCI doorbell mem area for tx producer index */
3186 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3187 tx_ring->prod_idx = 0;
3188 /* TX PCI doorbell mem area + 0x04 */
3189 tx_ring->valid_db_reg = doorbell_area + 0x04;
3192 * Assign shadow registers for this tx_ring.
3194 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3195 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3197 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3198 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3199 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3200 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3202 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3204 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3206 ql_init_tx_ring(qdev, tx_ring);
3208 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3209 (u16) tx_ring->wq_id);
3211 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3214 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3215 "Successfully loaded WQICB.\n");
3219 static void ql_disable_msix(struct ql_adapter *qdev)
3221 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3222 pci_disable_msix(qdev->pdev);
3223 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3224 kfree(qdev->msi_x_entry);
3225 qdev->msi_x_entry = NULL;
3226 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3227 pci_disable_msi(qdev->pdev);
3228 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3232 /* We start by trying to get the number of vectors
3233 * stored in qdev->intr_count. If we don't get that
3234 * many then we reduce the count and try again.
3236 static void ql_enable_msix(struct ql_adapter *qdev)
3240 /* Get the MSIX vectors. */
3241 if (qlge_irq_type == MSIX_IRQ) {
3242 /* Try to alloc space for the msix struct,
3243 * if it fails then go to MSI/legacy.
3245 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3246 sizeof(struct msix_entry),
3248 if (!qdev->msi_x_entry) {
3249 qlge_irq_type = MSI_IRQ;
3253 for (i = 0; i < qdev->intr_count; i++)
3254 qdev->msi_x_entry[i].entry = i;
3256 /* Loop to get our vectors. We start with
3257 * what we want and settle for what we get.
3260 err = pci_enable_msix(qdev->pdev,
3261 qdev->msi_x_entry, qdev->intr_count);
3263 qdev->intr_count = err;
3267 kfree(qdev->msi_x_entry);
3268 qdev->msi_x_entry = NULL;
3269 netif_warn(qdev, ifup, qdev->ndev,
3270 "MSI-X Enable failed, trying MSI.\n");
3271 qdev->intr_count = 1;
3272 qlge_irq_type = MSI_IRQ;
3273 } else if (err == 0) {
3274 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3275 netif_info(qdev, ifup, qdev->ndev,
3276 "MSI-X Enabled, got %d vectors.\n",
3282 qdev->intr_count = 1;
3283 if (qlge_irq_type == MSI_IRQ) {
3284 if (!pci_enable_msi(qdev->pdev)) {
3285 set_bit(QL_MSI_ENABLED, &qdev->flags);
3286 netif_info(qdev, ifup, qdev->ndev,
3287 "Running with MSI interrupts.\n");
3291 qlge_irq_type = LEG_IRQ;
3292 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3293 "Running with legacy interrupts.\n");
3296 /* Each vector services 1 RSS ring and and 1 or more
3297 * TX completion rings. This function loops through
3298 * the TX completion rings and assigns the vector that
3299 * will service it. An example would be if there are
3300 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3301 * This would mean that vector 0 would service RSS ring 0
3302 * and TX competion rings 0,1,2 and 3. Vector 1 would
3303 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3305 static void ql_set_tx_vect(struct ql_adapter *qdev)
3308 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3310 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3311 /* Assign irq vectors to TX rx_rings.*/
3312 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3313 i < qdev->rx_ring_count; i++) {
3314 if (j == tx_rings_per_vector) {
3318 qdev->rx_ring[i].irq = vect;
3322 /* For single vector all rings have an irq
3325 for (i = 0; i < qdev->rx_ring_count; i++)
3326 qdev->rx_ring[i].irq = 0;
3330 /* Set the interrupt mask for this vector. Each vector
3331 * will service 1 RSS ring and 1 or more TX completion
3332 * rings. This function sets up a bit mask per vector
3333 * that indicates which rings it services.
3335 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3337 int j, vect = ctx->intr;
3338 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3340 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3341 /* Add the RSS ring serviced by this vector
3344 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3345 /* Add the TX ring(s) serviced by this vector
3347 for (j = 0; j < tx_rings_per_vector; j++) {
3349 (1 << qdev->rx_ring[qdev->rss_ring_count +
3350 (vect * tx_rings_per_vector) + j].cq_id);
3353 /* For single vector we just shift each queue's
3356 for (j = 0; j < qdev->rx_ring_count; j++)
3357 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3362 * Here we build the intr_context structures based on
3363 * our rx_ring count and intr vector count.
3364 * The intr_context structure is used to hook each vector
3365 * to possibly different handlers.
3367 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3370 struct intr_context *intr_context = &qdev->intr_context[0];
3372 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3373 /* Each rx_ring has it's
3374 * own intr_context since we have separate
3375 * vectors for each queue.
3377 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3378 qdev->rx_ring[i].irq = i;
3379 intr_context->intr = i;
3380 intr_context->qdev = qdev;
3381 /* Set up this vector's bit-mask that indicates
3382 * which queues it services.
3384 ql_set_irq_mask(qdev, intr_context);
3386 * We set up each vectors enable/disable/read bits so
3387 * there's no bit/mask calculations in the critical path.
3389 intr_context->intr_en_mask =
3390 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3391 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3393 intr_context->intr_dis_mask =
3394 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3395 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3397 intr_context->intr_read_mask =
3398 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3399 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3402 /* The first vector/queue handles
3403 * broadcast/multicast, fatal errors,
3404 * and firmware events. This in addition
3405 * to normal inbound NAPI processing.
3407 intr_context->handler = qlge_isr;
3408 sprintf(intr_context->name, "%s-rx-%d",
3409 qdev->ndev->name, i);
3412 * Inbound queues handle unicast frames only.
3414 intr_context->handler = qlge_msix_rx_isr;
3415 sprintf(intr_context->name, "%s-rx-%d",
3416 qdev->ndev->name, i);
3421 * All rx_rings use the same intr_context since
3422 * there is only one vector.
3424 intr_context->intr = 0;
3425 intr_context->qdev = qdev;
3427 * We set up each vectors enable/disable/read bits so
3428 * there's no bit/mask calculations in the critical path.
3430 intr_context->intr_en_mask =
3431 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3432 intr_context->intr_dis_mask =
3433 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3434 INTR_EN_TYPE_DISABLE;
3435 intr_context->intr_read_mask =
3436 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3438 * Single interrupt means one handler for all rings.
3440 intr_context->handler = qlge_isr;
3441 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3442 /* Set up this vector's bit-mask that indicates
3443 * which queues it services. In this case there is
3444 * a single vector so it will service all RSS and
3445 * TX completion rings.
3447 ql_set_irq_mask(qdev, intr_context);
3449 /* Tell the TX completion rings which MSIx vector
3450 * they will be using.
3452 ql_set_tx_vect(qdev);
3455 static void ql_free_irq(struct ql_adapter *qdev)
3458 struct intr_context *intr_context = &qdev->intr_context[0];
3460 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3461 if (intr_context->hooked) {
3462 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3463 free_irq(qdev->msi_x_entry[i].vector,
3465 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3466 "freeing msix interrupt %d.\n", i);
3468 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3469 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3470 "freeing msi interrupt %d.\n", i);
3474 ql_disable_msix(qdev);
3477 static int ql_request_irq(struct ql_adapter *qdev)
3481 struct pci_dev *pdev = qdev->pdev;
3482 struct intr_context *intr_context = &qdev->intr_context[0];
3484 ql_resolve_queues_to_irqs(qdev);
3486 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3487 atomic_set(&intr_context->irq_cnt, 0);
3488 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3489 status = request_irq(qdev->msi_x_entry[i].vector,
3490 intr_context->handler,
3495 netif_err(qdev, ifup, qdev->ndev,
3496 "Failed request for MSIX interrupt %d.\n",
3500 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 "Hooked intr %d, queue type %s, with name %s.\n",
3503 qdev->rx_ring[i].type == DEFAULT_Q ?
3505 qdev->rx_ring[i].type == TX_Q ?
3507 qdev->rx_ring[i].type == RX_Q ?
3509 intr_context->name);
3512 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3513 "trying msi or legacy interrupts.\n");
3514 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3515 "%s: irq = %d.\n", __func__, pdev->irq);
3516 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517 "%s: context->name = %s.\n", __func__,
3518 intr_context->name);
3519 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3520 "%s: dev_id = 0x%p.\n", __func__,
3523 request_irq(pdev->irq, qlge_isr,
3524 test_bit(QL_MSI_ENABLED,
3526 flags) ? 0 : IRQF_SHARED,
3527 intr_context->name, &qdev->rx_ring[0]);
3531 netif_err(qdev, ifup, qdev->ndev,
3532 "Hooked intr %d, queue type %s, with name %s.\n",
3534 qdev->rx_ring[0].type == DEFAULT_Q ?
3536 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3537 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3538 intr_context->name);
3540 intr_context->hooked = 1;
3544 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3549 static int ql_start_rss(struct ql_adapter *qdev)
3551 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3552 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3553 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3554 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3555 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3556 0xbe, 0xac, 0x01, 0xfa};
3557 struct ricb *ricb = &qdev->ricb;
3560 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3562 memset((void *)ricb, 0, sizeof(*ricb));
3564 ricb->base_cq = RSS_L4K;
3566 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3567 ricb->mask = cpu_to_le16((u16)(0x3ff));
3570 * Fill out the Indirection Table.
3572 for (i = 0; i < 1024; i++)
3573 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3575 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3576 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3578 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3580 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3582 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3585 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3586 "Successfully loaded RICB.\n");
3590 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3594 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597 /* Clear all the entries in the routing table. */
3598 for (i = 0; i < 16; i++) {
3599 status = ql_set_routing_reg(qdev, i, 0, 0);
3601 netif_err(qdev, ifup, qdev->ndev,
3602 "Failed to init routing register for CAM packets.\n");
3606 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3610 /* Initialize the frame-to-queue routing. */
3611 static int ql_route_initialize(struct ql_adapter *qdev)
3615 /* Clear all the entries in the routing table. */
3616 status = ql_clear_routing_entries(qdev);
3620 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3624 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3625 RT_IDX_IP_CSUM_ERR, 1);
3627 netif_err(qdev, ifup, qdev->ndev,
3628 "Failed to init routing register "
3629 "for IP CSUM error packets.\n");
3632 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3633 RT_IDX_TU_CSUM_ERR, 1);
3635 netif_err(qdev, ifup, qdev->ndev,
3636 "Failed to init routing register "
3637 "for TCP/UDP CSUM error packets.\n");
3640 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3642 netif_err(qdev, ifup, qdev->ndev,
3643 "Failed to init routing register for broadcast packets.\n");
3646 /* If we have more than one inbound queue, then turn on RSS in the
3649 if (qdev->rss_ring_count > 1) {
3650 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3651 RT_IDX_RSS_MATCH, 1);
3653 netif_err(qdev, ifup, qdev->ndev,
3654 "Failed to init routing register for MATCH RSS packets.\n");
3659 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to init routing register for CAM packets.\n");
3665 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3669 int ql_cam_route_initialize(struct ql_adapter *qdev)
3673 /* If check if the link is up and use to
3674 * determine if we are setting or clearing
3675 * the MAC address in the CAM.
3677 set = ql_read32(qdev, STS);
3678 set &= qdev->port_link_up;
3679 status = ql_set_mac_addr(qdev, set);
3681 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3685 status = ql_route_initialize(qdev);
3687 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3692 static int ql_adapter_initialize(struct ql_adapter *qdev)
3699 * Set up the System register to halt on errors.
3701 value = SYS_EFE | SYS_FAE;
3703 ql_write32(qdev, SYS, mask | value);
3705 /* Set the default queue, and VLAN behavior. */
3706 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3707 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3708 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3710 /* Set the MPI interrupt to enabled. */
3711 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3713 /* Enable the function, set pagesize, enable error checking. */
3714 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3715 FSC_EC | FSC_VM_PAGE_4K;
3716 value |= SPLT_SETTING;
3718 /* Set/clear header splitting. */
3719 mask = FSC_VM_PAGESIZE_MASK |
3720 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3721 ql_write32(qdev, FSC, mask | value);
3723 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3725 /* Set RX packet routing to use port/pci function on which the
3726 * packet arrived on in addition to usual frame routing.
3727 * This is helpful on bonding where both interfaces can have
3728 * the same MAC address.
3730 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3731 /* Reroute all packets to our Interface.
3732 * They may have been routed to MPI firmware
3735 value = ql_read32(qdev, MGMT_RCV_CFG);
3736 value &= ~MGMT_RCV_CFG_RM;
3739 /* Sticky reg needs clearing due to WOL. */
3740 ql_write32(qdev, MGMT_RCV_CFG, mask);
3741 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3743 /* Default WOL is enable on Mezz cards */
3744 if (qdev->pdev->subsystem_device == 0x0068 ||
3745 qdev->pdev->subsystem_device == 0x0180)
3746 qdev->wol = WAKE_MAGIC;
3748 /* Start up the rx queues. */
3749 for (i = 0; i < qdev->rx_ring_count; i++) {
3750 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3752 netif_err(qdev, ifup, qdev->ndev,
3753 "Failed to start rx ring[%d].\n", i);
3758 /* If there is more than one inbound completion queue
3759 * then download a RICB to configure RSS.
3761 if (qdev->rss_ring_count > 1) {
3762 status = ql_start_rss(qdev);
3764 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3769 /* Start up the tx queues. */
3770 for (i = 0; i < qdev->tx_ring_count; i++) {
3771 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to start tx ring[%d].\n", i);
3779 /* Initialize the port and set the max framesize. */
3780 status = qdev->nic_ops->port_initialize(qdev);
3782 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3784 /* Set up the MAC address and frame routing filter. */
3785 status = ql_cam_route_initialize(qdev);
3787 netif_err(qdev, ifup, qdev->ndev,
3788 "Failed to init CAM/Routing tables.\n");
3792 /* Start NAPI for the RSS queues. */
3793 for (i = 0; i < qdev->rss_ring_count; i++) {
3794 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3795 "Enabling NAPI for rx_ring[%d].\n", i);
3796 napi_enable(&qdev->rx_ring[i].napi);
3802 /* Issue soft reset to chip. */
3803 static int ql_adapter_reset(struct ql_adapter *qdev)
3807 unsigned long end_jiffies;
3809 /* Clear all the entries in the routing table. */
3810 status = ql_clear_routing_entries(qdev);
3812 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3816 end_jiffies = jiffies +
3817 max((unsigned long)1, usecs_to_jiffies(30));
3819 /* Stop management traffic. */
3820 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3822 /* Wait for the NIC and MGMNT FIFOs to empty. */
3823 ql_wait_fifo_empty(qdev);
3825 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3828 value = ql_read32(qdev, RST_FO);
3829 if ((value & RST_FO_FR) == 0)
3832 } while (time_before(jiffies, end_jiffies));
3834 if (value & RST_FO_FR) {
3835 netif_err(qdev, ifdown, qdev->ndev,
3836 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3837 status = -ETIMEDOUT;
3840 /* Resume management traffic. */
3841 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3845 static void ql_display_dev_info(struct net_device *ndev)
3847 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3849 netif_info(qdev, probe, qdev->ndev,
3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3851 "XG Roll = %d, XG Rev = %d.\n",
3854 qdev->chip_rev_id & 0x0000000f,
3855 qdev->chip_rev_id >> 4 & 0x0000000f,
3856 qdev->chip_rev_id >> 8 & 0x0000000f,
3857 qdev->chip_rev_id >> 12 & 0x0000000f);
3858 netif_info(qdev, probe, qdev->ndev,
3859 "MAC address %pM\n", ndev->dev_addr);
3862 static int ql_wol(struct ql_adapter *qdev)
3865 u32 wol = MB_WOL_DISABLE;
3867 /* The CAM is still intact after a reset, but if we
3868 * are doing WOL, then we may need to program the
3869 * routing regs. We would also need to issue the mailbox
3870 * commands to instruct the MPI what to do per the ethtool
3874 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3875 WAKE_MCAST | WAKE_BCAST)) {
3876 netif_err(qdev, ifdown, qdev->ndev,
3877 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3882 if (qdev->wol & WAKE_MAGIC) {
3883 status = ql_mb_wol_set_magic(qdev, 1);
3885 netif_err(qdev, ifdown, qdev->ndev,
3886 "Failed to set magic packet on %s.\n",
3890 netif_info(qdev, drv, qdev->ndev,
3891 "Enabled magic packet successfully on %s.\n",
3894 wol |= MB_WOL_MAGIC_PKT;
3898 wol |= MB_WOL_MODE_ON;
3899 status = ql_mb_wol_mode(qdev, wol);
3900 netif_err(qdev, drv, qdev->ndev,
3901 "WOL %s (wol code 0x%x) on %s\n",
3902 (status == 0) ? "Successfully set" : "Failed",
3903 wol, qdev->ndev->name);
3909 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3912 /* Don't kill the reset worker thread if we
3913 * are in the process of recovery.
3915 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3916 cancel_delayed_work_sync(&qdev->asic_reset_work);
3917 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3918 cancel_delayed_work_sync(&qdev->mpi_work);
3919 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3920 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3921 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3924 static int ql_adapter_down(struct ql_adapter *qdev)
3930 ql_cancel_all_work_sync(qdev);
3932 for (i = 0; i < qdev->rss_ring_count; i++)
3933 napi_disable(&qdev->rx_ring[i].napi);
3935 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3937 ql_disable_interrupts(qdev);
3939 ql_tx_ring_clean(qdev);
3941 /* Call netif_napi_del() from common point.
3943 for (i = 0; i < qdev->rss_ring_count; i++)
3944 netif_napi_del(&qdev->rx_ring[i].napi);
3946 status = ql_adapter_reset(qdev);
3948 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3950 ql_free_rx_buffers(qdev);
3955 static int ql_adapter_up(struct ql_adapter *qdev)
3959 err = ql_adapter_initialize(qdev);
3961 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3964 set_bit(QL_ADAPTER_UP, &qdev->flags);
3965 ql_alloc_rx_buffers(qdev);
3966 /* If the port is initialized and the
3967 * link is up the turn on the carrier.
3969 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3970 (ql_read32(qdev, STS) & qdev->port_link_up))
3972 /* Restore rx mode. */
3973 clear_bit(QL_ALLMULTI, &qdev->flags);
3974 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3975 qlge_set_multicast_list(qdev->ndev);
3977 /* Restore vlan setting. */
3978 qlge_restore_vlan(qdev);
3980 ql_enable_interrupts(qdev);
3981 ql_enable_all_completion_interrupts(qdev);
3982 netif_tx_start_all_queues(qdev->ndev);
3986 ql_adapter_reset(qdev);
3990 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3992 ql_free_mem_resources(qdev);
3996 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4000 if (ql_alloc_mem_resources(qdev)) {
4001 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4004 status = ql_request_irq(qdev);
4008 static int qlge_close(struct net_device *ndev)
4010 struct ql_adapter *qdev = netdev_priv(ndev);
4012 /* If we hit pci_channel_io_perm_failure
4013 * failure condition, then we already
4014 * brought the adapter down.
4016 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4017 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4018 clear_bit(QL_EEH_FATAL, &qdev->flags);
4023 * Wait for device to recover from a reset.
4024 * (Rarely happens, but possible.)
4026 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4028 ql_adapter_down(qdev);
4029 ql_release_adapter_resources(qdev);
4033 static int ql_configure_rings(struct ql_adapter *qdev)
4036 struct rx_ring *rx_ring;
4037 struct tx_ring *tx_ring;
4038 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4039 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4040 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4042 qdev->lbq_buf_order = get_order(lbq_buf_len);
4044 /* In a perfect world we have one RSS ring for each CPU
4045 * and each has it's own vector. To do that we ask for
4046 * cpu_cnt vectors. ql_enable_msix() will adjust the
4047 * vector count to what we actually get. We then
4048 * allocate an RSS ring for each.
4049 * Essentially, we are doing min(cpu_count, msix_vector_count).
4051 qdev->intr_count = cpu_cnt;
4052 ql_enable_msix(qdev);
4053 /* Adjust the RSS ring count to the actual vector count. */
4054 qdev->rss_ring_count = qdev->intr_count;
4055 qdev->tx_ring_count = cpu_cnt;
4056 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4058 for (i = 0; i < qdev->tx_ring_count; i++) {
4059 tx_ring = &qdev->tx_ring[i];
4060 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4061 tx_ring->qdev = qdev;
4063 tx_ring->wq_len = qdev->tx_ring_size;
4065 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4068 * The completion queue ID for the tx rings start
4069 * immediately after the rss rings.
4071 tx_ring->cq_id = qdev->rss_ring_count + i;
4074 for (i = 0; i < qdev->rx_ring_count; i++) {
4075 rx_ring = &qdev->rx_ring[i];
4076 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4077 rx_ring->qdev = qdev;
4079 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4080 if (i < qdev->rss_ring_count) {
4082 * Inbound (RSS) queues.
4084 rx_ring->cq_len = qdev->rx_ring_size;
4086 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4087 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4089 rx_ring->lbq_len * sizeof(__le64);
4090 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4091 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4092 "lbq_buf_size %d, order = %d\n",
4093 rx_ring->lbq_buf_size,
4094 qdev->lbq_buf_order);
4095 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4097 rx_ring->sbq_len * sizeof(__le64);
4098 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4099 rx_ring->type = RX_Q;
4102 * Outbound queue handles outbound completions only.
4104 /* outbound cq is same size as tx_ring it services. */
4105 rx_ring->cq_len = qdev->tx_ring_size;
4107 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4108 rx_ring->lbq_len = 0;
4109 rx_ring->lbq_size = 0;
4110 rx_ring->lbq_buf_size = 0;
4111 rx_ring->sbq_len = 0;
4112 rx_ring->sbq_size = 0;
4113 rx_ring->sbq_buf_size = 0;
4114 rx_ring->type = TX_Q;
4120 static int qlge_open(struct net_device *ndev)
4123 struct ql_adapter *qdev = netdev_priv(ndev);
4125 err = ql_adapter_reset(qdev);
4129 err = ql_configure_rings(qdev);
4133 err = ql_get_adapter_resources(qdev);
4137 err = ql_adapter_up(qdev);
4144 ql_release_adapter_resources(qdev);
4148 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4150 struct rx_ring *rx_ring;
4154 /* Wait for an oustanding reset to complete. */
4155 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4157 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4158 netif_err(qdev, ifup, qdev->ndev,
4159 "Waiting for adapter UP...\n");
4164 netif_err(qdev, ifup, qdev->ndev,
4165 "Timed out waiting for adapter UP\n");
4170 status = ql_adapter_down(qdev);
4174 /* Get the new rx buffer size. */
4175 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4176 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4177 qdev->lbq_buf_order = get_order(lbq_buf_len);
4179 for (i = 0; i < qdev->rss_ring_count; i++) {
4180 rx_ring = &qdev->rx_ring[i];
4181 /* Set the new size. */
4182 rx_ring->lbq_buf_size = lbq_buf_len;
4185 status = ql_adapter_up(qdev);
4191 netif_alert(qdev, ifup, qdev->ndev,
4192 "Driver up/down cycle failed, closing device.\n");
4193 set_bit(QL_ADAPTER_UP, &qdev->flags);
4194 dev_close(qdev->ndev);
4198 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4200 struct ql_adapter *qdev = netdev_priv(ndev);
4203 if (ndev->mtu == 1500 && new_mtu == 9000) {
4204 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4205 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4206 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4210 queue_delayed_work(qdev->workqueue,
4211 &qdev->mpi_port_cfg_work, 3*HZ);
4213 ndev->mtu = new_mtu;
4215 if (!netif_running(qdev->ndev)) {
4219 status = ql_change_rx_buffers(qdev);
4221 netif_err(qdev, ifup, qdev->ndev,
4222 "Changing MTU failed.\n");
4228 static struct net_device_stats *qlge_get_stats(struct net_device
4231 struct ql_adapter *qdev = netdev_priv(ndev);
4232 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4233 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4234 unsigned long pkts, mcast, dropped, errors, bytes;
4238 pkts = mcast = dropped = errors = bytes = 0;
4239 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4240 pkts += rx_ring->rx_packets;
4241 bytes += rx_ring->rx_bytes;
4242 dropped += rx_ring->rx_dropped;
4243 errors += rx_ring->rx_errors;
4244 mcast += rx_ring->rx_multicast;
4246 ndev->stats.rx_packets = pkts;
4247 ndev->stats.rx_bytes = bytes;
4248 ndev->stats.rx_dropped = dropped;
4249 ndev->stats.rx_errors = errors;
4250 ndev->stats.multicast = mcast;
4253 pkts = errors = bytes = 0;
4254 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4255 pkts += tx_ring->tx_packets;
4256 bytes += tx_ring->tx_bytes;
4257 errors += tx_ring->tx_errors;
4259 ndev->stats.tx_packets = pkts;
4260 ndev->stats.tx_bytes = bytes;
4261 ndev->stats.tx_errors = errors;
4262 return &ndev->stats;
4265 static void qlge_set_multicast_list(struct net_device *ndev)
4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4268 struct netdev_hw_addr *ha;
4271 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4275 * Set or clear promiscuous mode if a
4276 * transition is taking place.
4278 if (ndev->flags & IFF_PROMISC) {
4279 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4280 if (ql_set_routing_reg
4281 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4282 netif_err(qdev, hw, qdev->ndev,
4283 "Failed to set promiscous mode.\n");
4285 set_bit(QL_PROMISCUOUS, &qdev->flags);
4289 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to clear promiscous mode.\n");
4295 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4301 * Set or clear all multicast mode if a
4302 * transition is taking place.
4304 if ((ndev->flags & IFF_ALLMULTI) ||
4305 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4306 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4307 if (ql_set_routing_reg
4308 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4309 netif_err(qdev, hw, qdev->ndev,
4310 "Failed to set all-multi mode.\n");
4312 set_bit(QL_ALLMULTI, &qdev->flags);
4316 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4317 if (ql_set_routing_reg
4318 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4319 netif_err(qdev, hw, qdev->ndev,
4320 "Failed to clear all-multi mode.\n");
4322 clear_bit(QL_ALLMULTI, &qdev->flags);
4327 if (!netdev_mc_empty(ndev)) {
4328 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4332 netdev_for_each_mc_addr(ha, ndev) {
4333 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4334 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4335 netif_err(qdev, hw, qdev->ndev,
4336 "Failed to loadmulticast address.\n");
4337 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4342 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4343 if (ql_set_routing_reg
4344 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4345 netif_err(qdev, hw, qdev->ndev,
4346 "Failed to set multicast match mode.\n");
4348 set_bit(QL_ALLMULTI, &qdev->flags);
4352 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4355 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4357 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4358 struct sockaddr *addr = p;
4361 if (!is_valid_ether_addr(addr->sa_data))
4362 return -EADDRNOTAVAIL;
4363 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4364 /* Update local copy of current mac address. */
4365 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4367 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4370 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4371 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4373 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4378 static void qlge_tx_timeout(struct net_device *ndev)
4380 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4381 ql_queue_asic_error(qdev);
4384 static void ql_asic_reset_work(struct work_struct *work)
4386 struct ql_adapter *qdev =
4387 container_of(work, struct ql_adapter, asic_reset_work.work);
4390 status = ql_adapter_down(qdev);
4394 status = ql_adapter_up(qdev);
4398 /* Restore rx mode. */
4399 clear_bit(QL_ALLMULTI, &qdev->flags);
4400 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4401 qlge_set_multicast_list(qdev->ndev);
4406 netif_alert(qdev, ifup, qdev->ndev,
4407 "Driver up/down cycle failed, closing device\n");
4409 set_bit(QL_ADAPTER_UP, &qdev->flags);
4410 dev_close(qdev->ndev);
4414 static struct nic_operations qla8012_nic_ops = {
4415 .get_flash = ql_get_8012_flash_params,
4416 .port_initialize = ql_8012_port_initialize,
4419 static struct nic_operations qla8000_nic_ops = {
4420 .get_flash = ql_get_8000_flash_params,
4421 .port_initialize = ql_8000_port_initialize,
4424 /* Find the pcie function number for the other NIC
4425 * on this chip. Since both NIC functions share a
4426 * common firmware we have the lowest enabled function
4427 * do any common work. Examples would be resetting
4428 * after a fatal firmware error, or doing a firmware
4431 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4435 u32 nic_func1, nic_func2;
4437 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4442 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4443 MPI_TEST_NIC_FUNC_MASK);
4444 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4445 MPI_TEST_NIC_FUNC_MASK);
4447 if (qdev->func == nic_func1)
4448 qdev->alt_func = nic_func2;
4449 else if (qdev->func == nic_func2)
4450 qdev->alt_func = nic_func1;
4457 static int ql_get_board_info(struct ql_adapter *qdev)
4461 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4465 status = ql_get_alt_pcie_func(qdev);
4469 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4471 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4472 qdev->port_link_up = STS_PL1;
4473 qdev->port_init = STS_PI1;
4474 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4475 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4477 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4478 qdev->port_link_up = STS_PL0;
4479 qdev->port_init = STS_PI0;
4480 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4481 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4483 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4484 qdev->device_id = qdev->pdev->device;
4485 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4486 qdev->nic_ops = &qla8012_nic_ops;
4487 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4488 qdev->nic_ops = &qla8000_nic_ops;
4492 static void ql_release_all(struct pci_dev *pdev)
4494 struct net_device *ndev = pci_get_drvdata(pdev);
4495 struct ql_adapter *qdev = netdev_priv(ndev);
4497 if (qdev->workqueue) {
4498 destroy_workqueue(qdev->workqueue);
4499 qdev->workqueue = NULL;
4503 iounmap(qdev->reg_base);
4504 if (qdev->doorbell_area)
4505 iounmap(qdev->doorbell_area);
4506 vfree(qdev->mpi_coredump);
4507 pci_release_regions(pdev);
4508 pci_set_drvdata(pdev, NULL);
4511 static int __devinit ql_init_device(struct pci_dev *pdev,
4512 struct net_device *ndev, int cards_found)
4514 struct ql_adapter *qdev = netdev_priv(ndev);
4517 memset((void *)qdev, 0, sizeof(*qdev));
4518 err = pci_enable_device(pdev);
4520 dev_err(&pdev->dev, "PCI device enable failed.\n");
4526 pci_set_drvdata(pdev, ndev);
4528 /* Set PCIe read request size */
4529 err = pcie_set_readrq(pdev, 4096);
4531 dev_err(&pdev->dev, "Set readrq failed.\n");
4535 err = pci_request_regions(pdev, DRV_NAME);
4537 dev_err(&pdev->dev, "PCI region request failed.\n");
4541 pci_set_master(pdev);
4542 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4543 set_bit(QL_DMA64, &qdev->flags);
4544 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4546 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4548 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4552 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4556 /* Set PCIe reset type for EEH to fundamental. */
4557 pdev->needs_freset = 1;
4558 pci_save_state(pdev);
4560 ioremap_nocache(pci_resource_start(pdev, 1),
4561 pci_resource_len(pdev, 1));
4562 if (!qdev->reg_base) {
4563 dev_err(&pdev->dev, "Register mapping failed.\n");
4568 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4569 qdev->doorbell_area =
4570 ioremap_nocache(pci_resource_start(pdev, 3),
4571 pci_resource_len(pdev, 3));
4572 if (!qdev->doorbell_area) {
4573 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4578 err = ql_get_board_info(qdev);
4580 dev_err(&pdev->dev, "Register access failed.\n");
4584 qdev->msg_enable = netif_msg_init(debug, default_msg);
4585 spin_lock_init(&qdev->hw_lock);
4586 spin_lock_init(&qdev->stats_lock);
4588 if (qlge_mpi_coredump) {
4589 qdev->mpi_coredump =
4590 vmalloc(sizeof(struct ql_mpi_coredump));
4591 if (qdev->mpi_coredump == NULL) {
4592 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4596 if (qlge_force_coredump)
4597 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4599 /* make sure the EEPROM is good */
4600 err = qdev->nic_ops->get_flash(qdev);
4602 dev_err(&pdev->dev, "Invalid FLASH.\n");
4606 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4607 /* Keep local copy of current mac address. */
4608 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4610 /* Set up the default ring sizes. */
4611 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4612 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4614 /* Set up the coalescing parameters. */
4615 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4616 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4617 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4618 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4621 * Set up the operating parameters.
4624 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4625 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4626 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4627 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4628 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4629 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4630 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4631 init_completion(&qdev->ide_completion);
4634 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4635 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4636 DRV_NAME, DRV_VERSION);
4640 ql_release_all(pdev);
4642 pci_disable_device(pdev);
4646 static const struct net_device_ops qlge_netdev_ops = {
4647 .ndo_open = qlge_open,
4648 .ndo_stop = qlge_close,
4649 .ndo_start_xmit = qlge_send,
4650 .ndo_change_mtu = qlge_change_mtu,
4651 .ndo_get_stats = qlge_get_stats,
4652 .ndo_set_multicast_list = qlge_set_multicast_list,
4653 .ndo_set_mac_address = qlge_set_mac_address,
4654 .ndo_validate_addr = eth_validate_addr,
4655 .ndo_tx_timeout = qlge_tx_timeout,
4656 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4657 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4658 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4661 static void ql_timer(unsigned long data)
4663 struct ql_adapter *qdev = (struct ql_adapter *)data;
4666 var = ql_read32(qdev, STS);
4667 if (pci_channel_offline(qdev->pdev)) {
4668 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4672 mod_timer(&qdev->timer, jiffies + (5*HZ));
4675 static int __devinit qlge_probe(struct pci_dev *pdev,
4676 const struct pci_device_id *pci_entry)
4678 struct net_device *ndev = NULL;
4679 struct ql_adapter *qdev = NULL;
4680 static int cards_found = 0;
4683 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4684 min(MAX_CPUS, (int)num_online_cpus()));
4688 err = ql_init_device(pdev, ndev, cards_found);
4694 qdev = netdev_priv(ndev);
4695 SET_NETDEV_DEV(ndev, &pdev->dev);
4702 | NETIF_F_HW_VLAN_TX
4703 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4704 ndev->features |= NETIF_F_GRO;
4706 if (test_bit(QL_DMA64, &qdev->flags))
4707 ndev->features |= NETIF_F_HIGHDMA;
4710 * Set up net_device structure.
4712 ndev->tx_queue_len = qdev->tx_ring_size;
4713 ndev->irq = pdev->irq;
4715 ndev->netdev_ops = &qlge_netdev_ops;
4716 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4717 ndev->watchdog_timeo = 10 * HZ;
4719 err = register_netdev(ndev);
4721 dev_err(&pdev->dev, "net device registration failed.\n");
4722 ql_release_all(pdev);
4723 pci_disable_device(pdev);
4726 /* Start up the timer to trigger EEH if
4729 init_timer_deferrable(&qdev->timer);
4730 qdev->timer.data = (unsigned long)qdev;
4731 qdev->timer.function = ql_timer;
4732 qdev->timer.expires = jiffies + (5*HZ);
4733 add_timer(&qdev->timer);
4735 ql_display_dev_info(ndev);
4736 atomic_set(&qdev->lb_count, 0);
4741 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4743 return qlge_send(skb, ndev);
4746 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4748 return ql_clean_inbound_rx_ring(rx_ring, budget);
4751 static void __devexit qlge_remove(struct pci_dev *pdev)
4753 struct net_device *ndev = pci_get_drvdata(pdev);
4754 struct ql_adapter *qdev = netdev_priv(ndev);
4755 del_timer_sync(&qdev->timer);
4756 ql_cancel_all_work_sync(qdev);
4757 unregister_netdev(ndev);
4758 ql_release_all(pdev);
4759 pci_disable_device(pdev);
4763 /* Clean up resources without touching hardware. */
4764 static void ql_eeh_close(struct net_device *ndev)
4767 struct ql_adapter *qdev = netdev_priv(ndev);
4769 if (netif_carrier_ok(ndev)) {
4770 netif_carrier_off(ndev);
4771 netif_stop_queue(ndev);
4774 /* Disabling the timer */
4775 del_timer_sync(&qdev->timer);
4776 ql_cancel_all_work_sync(qdev);
4778 for (i = 0; i < qdev->rss_ring_count; i++)
4779 netif_napi_del(&qdev->rx_ring[i].napi);
4781 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4782 ql_tx_ring_clean(qdev);
4783 ql_free_rx_buffers(qdev);
4784 ql_release_adapter_resources(qdev);
4788 * This callback is called by the PCI subsystem whenever
4789 * a PCI bus error is detected.
4791 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4792 enum pci_channel_state state)
4794 struct net_device *ndev = pci_get_drvdata(pdev);
4795 struct ql_adapter *qdev = netdev_priv(ndev);
4798 case pci_channel_io_normal:
4799 return PCI_ERS_RESULT_CAN_RECOVER;
4800 case pci_channel_io_frozen:
4801 netif_device_detach(ndev);
4802 if (netif_running(ndev))
4804 pci_disable_device(pdev);
4805 return PCI_ERS_RESULT_NEED_RESET;
4806 case pci_channel_io_perm_failure:
4808 "%s: pci_channel_io_perm_failure.\n", __func__);
4810 set_bit(QL_EEH_FATAL, &qdev->flags);
4811 return PCI_ERS_RESULT_DISCONNECT;
4814 /* Request a slot reset. */
4815 return PCI_ERS_RESULT_NEED_RESET;
4819 * This callback is called after the PCI buss has been reset.
4820 * Basically, this tries to restart the card from scratch.
4821 * This is a shortened version of the device probe/discovery code,
4822 * it resembles the first-half of the () routine.
4824 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4826 struct net_device *ndev = pci_get_drvdata(pdev);
4827 struct ql_adapter *qdev = netdev_priv(ndev);
4829 pdev->error_state = pci_channel_io_normal;
4831 pci_restore_state(pdev);
4832 if (pci_enable_device(pdev)) {
4833 netif_err(qdev, ifup, qdev->ndev,
4834 "Cannot re-enable PCI device after reset.\n");
4835 return PCI_ERS_RESULT_DISCONNECT;
4837 pci_set_master(pdev);
4839 if (ql_adapter_reset(qdev)) {
4840 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4841 set_bit(QL_EEH_FATAL, &qdev->flags);
4842 return PCI_ERS_RESULT_DISCONNECT;
4845 return PCI_ERS_RESULT_RECOVERED;
4848 static void qlge_io_resume(struct pci_dev *pdev)
4850 struct net_device *ndev = pci_get_drvdata(pdev);
4851 struct ql_adapter *qdev = netdev_priv(ndev);
4854 if (netif_running(ndev)) {
4855 err = qlge_open(ndev);
4857 netif_err(qdev, ifup, qdev->ndev,
4858 "Device initialization failed after reset.\n");
4862 netif_err(qdev, ifup, qdev->ndev,
4863 "Device was not running prior to EEH.\n");
4865 mod_timer(&qdev->timer, jiffies + (5*HZ));
4866 netif_device_attach(ndev);
4869 static struct pci_error_handlers qlge_err_handler = {
4870 .error_detected = qlge_io_error_detected,
4871 .slot_reset = qlge_io_slot_reset,
4872 .resume = qlge_io_resume,
4875 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4877 struct net_device *ndev = pci_get_drvdata(pdev);
4878 struct ql_adapter *qdev = netdev_priv(ndev);
4881 netif_device_detach(ndev);
4882 del_timer_sync(&qdev->timer);
4884 if (netif_running(ndev)) {
4885 err = ql_adapter_down(qdev);
4891 err = pci_save_state(pdev);
4895 pci_disable_device(pdev);
4897 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4903 static int qlge_resume(struct pci_dev *pdev)
4905 struct net_device *ndev = pci_get_drvdata(pdev);
4906 struct ql_adapter *qdev = netdev_priv(ndev);
4909 pci_set_power_state(pdev, PCI_D0);
4910 pci_restore_state(pdev);
4911 err = pci_enable_device(pdev);
4913 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4916 pci_set_master(pdev);
4918 pci_enable_wake(pdev, PCI_D3hot, 0);
4919 pci_enable_wake(pdev, PCI_D3cold, 0);
4921 if (netif_running(ndev)) {
4922 err = ql_adapter_up(qdev);
4927 mod_timer(&qdev->timer, jiffies + (5*HZ));
4928 netif_device_attach(ndev);
4932 #endif /* CONFIG_PM */
4934 static void qlge_shutdown(struct pci_dev *pdev)
4936 qlge_suspend(pdev, PMSG_SUSPEND);
4939 static struct pci_driver qlge_driver = {
4941 .id_table = qlge_pci_tbl,
4942 .probe = qlge_probe,
4943 .remove = __devexit_p(qlge_remove),
4945 .suspend = qlge_suspend,
4946 .resume = qlge_resume,
4948 .shutdown = qlge_shutdown,
4949 .err_handler = &qlge_err_handler
4952 static int __init qlge_init_module(void)
4954 return pci_register_driver(&qlge_driver);
4957 static void __exit qlge_exit(void)
4959 pci_unregister_driver(&qlge_driver);
4962 module_init(qlge_init_module);
4963 module_exit(qlge_exit);