2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/if_vlan.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = 0x00007fff; /* defaults above */
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int irq_type = MSIX_IRQ;
74 module_param(irq_type, int, MSIX_IRQ);
75 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
80 /* required last entry */
84 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
86 /* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
90 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
96 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
99 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
102 sem_bits = SEM_SET << SEM_ICB_SHIFT;
104 case SEM_MAC_ADDR_MASK:
105 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
108 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
111 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
113 case SEM_RT_IDX_MASK:
114 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
116 case SEM_PROC_REG_MASK:
117 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
120 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
124 ql_write32(qdev, SEM, sem_bits | sem_mask);
125 return !(ql_read32(qdev, SEM) & sem_bits);
128 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
130 unsigned int seconds = 3;
132 if (!ql_sem_trylock(qdev, sem_mask))
139 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
141 ql_write32(qdev, SEM, sem_mask);
142 ql_read32(qdev, SEM); /* flush */
145 /* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
150 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
153 int count = UDELAY_COUNT;
156 temp = ql_read32(qdev, reg);
158 /* check for errors */
159 if (temp & err_bit) {
160 QPRINTK(qdev, PROBE, ALERT,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
164 } else if (temp & bit)
166 udelay(UDELAY_DELAY);
169 QPRINTK(qdev, PROBE, ALERT,
170 "Timed out waiting for reg %x to come ready.\n", reg);
174 /* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
177 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
179 int count = UDELAY_COUNT;
183 temp = ql_read32(qdev, CFG);
188 udelay(UDELAY_DELAY);
195 /* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
198 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
208 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
211 map = pci_map_single(qdev->pdev, ptr, size, direction);
212 if (pci_dma_mapping_error(qdev->pdev, map)) {
213 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
217 status = ql_wait_cfg(qdev, bit);
219 QPRINTK(qdev, IFUP, ERR,
220 "Timed out waiting for CFG to come ready.\n");
224 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
229 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT);
233 ql_write32(qdev, CFG, (mask | value));
236 * Wait for the bit to clear after signaling hw.
238 status = ql_wait_cfg(qdev, bit);
240 pci_unmap_single(qdev->pdev, map, size, direction);
244 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
251 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
255 case MAC_ADDR_TYPE_MULTI_MAC:
256 case MAC_ADDR_TYPE_CAM_MAC:
259 ql_wait_reg_rdy(qdev,
260 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
263 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
264 (index << MAC_ADDR_IDX_SHIFT) | /* index */
265 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
267 ql_wait_reg_rdy(qdev,
268 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
271 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 if (type == MAC_ADDR_TYPE_CAM_MAC) {
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 MAC_ADDR_MR, MAC_ADDR_E);
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
304 case MAC_ADDR_TYPE_VLAN:
305 case MAC_ADDR_TYPE_MULTI_FLTR:
307 QPRINTK(qdev, IFUP, CRIT,
308 "Address type %d not yet supported.\n", type);
312 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
316 /* Set up a MAC, multicast or VLAN address for the
317 * inbound frame matching.
319 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
325 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
329 case MAC_ADDR_TYPE_MULTI_MAC:
330 case MAC_ADDR_TYPE_CAM_MAC:
333 u32 upper = (addr[0] << 8) | addr[1];
335 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
338 QPRINTK(qdev, IFUP, INFO,
339 "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
340 " at index %d in the CAM.\n",
342 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
343 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
344 addr[4], addr[5], index);
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
352 (index << MAC_ADDR_IDX_SHIFT) | /* index */
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
361 (index << MAC_ADDR_IDX_SHIFT) | /* index */
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
369 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
370 (index << MAC_ADDR_IDX_SHIFT) | /* index */
372 /* This field should also include the queue id
373 and possibly the function id. Right now we hardcode
374 the route field to NIC core.
376 if (type == MAC_ADDR_TYPE_CAM_MAC) {
377 cam_output = (CAM_OUT_ROUTE_NIC |
379 func << CAM_OUT_FUNC_SHIFT) |
381 rss_ring_first_cq_id <<
382 CAM_OUT_CQ_ID_SHIFT));
384 cam_output |= CAM_OUT_RV;
385 /* route to NIC core */
386 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
390 case MAC_ADDR_TYPE_VLAN:
392 u32 enable_bit = *((u32 *) &addr[0]);
393 /* For VLAN, the addr actually holds a bit that
394 * either enables or disables the vlan id we are
395 * addressing. It's either MAC_ADDR_E on or off.
396 * That's bit-27 we're talking about.
398 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
399 (enable_bit ? "Adding" : "Removing"),
400 index, (enable_bit ? "to" : "from"));
403 ql_wait_reg_rdy(qdev,
404 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
407 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
408 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 enable_bit); /* enable/disable */
413 case MAC_ADDR_TYPE_MULTI_FLTR:
415 QPRINTK(qdev, IFUP, CRIT,
416 "Address type %d not yet supported.\n", type);
420 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
424 /* Get a specific frame routing value from the CAM.
425 * Used for debug and reg dump.
427 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
431 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
435 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
439 ql_write32(qdev, RT_IDX,
440 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
441 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
444 *value = ql_read32(qdev, RT_DATA);
446 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
450 /* The NIC function for this chip has 16 routing indexes. Each one can be used
451 * to route different frame types to various inbound queues. We send broadcast/
452 * multicast/error frames to the default queue for slow handling,
453 * and CAM hit/RSS frames to the fast handling queues.
455 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
461 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
465 QPRINTK(qdev, IFUP, DEBUG,
466 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
467 (enable ? "Adding" : "Removing"),
468 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
469 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
471 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
472 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
473 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
474 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
475 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
476 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
477 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
478 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
479 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
480 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
481 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
482 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
483 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
484 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
485 (enable ? "to" : "from"));
490 value = RT_IDX_DST_CAM_Q | /* dest */
491 RT_IDX_TYPE_NICQ | /* type */
492 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
495 case RT_IDX_VALID: /* Promiscuous Mode frames. */
497 value = RT_IDX_DST_DFLT_Q | /* dest */
498 RT_IDX_TYPE_NICQ | /* type */
499 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
502 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
504 value = RT_IDX_DST_DFLT_Q | /* dest */
505 RT_IDX_TYPE_NICQ | /* type */
506 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
509 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
511 value = RT_IDX_DST_DFLT_Q | /* dest */
512 RT_IDX_TYPE_NICQ | /* type */
513 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
516 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
518 value = RT_IDX_DST_CAM_Q | /* dest */
519 RT_IDX_TYPE_NICQ | /* type */
520 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
523 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
525 value = RT_IDX_DST_CAM_Q | /* dest */
526 RT_IDX_TYPE_NICQ | /* type */
527 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
530 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
532 value = RT_IDX_DST_RSS | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
537 case 0: /* Clear the E-bit on an entry. */
539 value = RT_IDX_DST_DFLT_Q | /* dest */
540 RT_IDX_TYPE_NICQ | /* type */
541 (index << RT_IDX_IDX_SHIFT);/* index */
545 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
552 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
555 value |= (enable ? RT_IDX_E : 0);
556 ql_write32(qdev, RT_IDX, value);
557 ql_write32(qdev, RT_DATA, enable ? mask : 0);
560 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
564 static void ql_enable_interrupts(struct ql_adapter *qdev)
566 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
569 static void ql_disable_interrupts(struct ql_adapter *qdev)
571 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
574 /* If we're running with multiple MSI-X vectors then we enable on the fly.
575 * Otherwise, we may have multiple outstanding workers and don't want to
576 * enable until the last one finishes. In this case, the irq_cnt gets
577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt.
580 void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
583 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask);
586 if (qdev->legacy_check)
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
602 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
609 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask);
611 var = ql_read32(qdev, STS);
613 atomic_inc(&qdev->intr_context[intr].irq_cnt);
618 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
621 for (i = 0; i < qdev->intr_count; i++) {
622 /* The enable call does a atomic_dec_and_test
623 * and enables only if the result is zero.
624 * So we precharge it here.
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i);
632 int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
635 /* wait for reg to come ready */
636 status = ql_wait_reg_rdy(qdev,
637 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
640 /* set up for reg read */
641 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
642 /* wait for reg to come ready */
643 status = ql_wait_reg_rdy(qdev,
644 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
648 *data = ql_read32(qdev, FLASH_DATA);
653 static int ql_get_flash_params(struct ql_adapter *qdev)
657 u32 *p = (u32 *)&qdev->flash;
659 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
662 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
663 status = ql_read_flash_word(qdev, i, p);
665 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
671 ql_sem_unlock(qdev, SEM_FLASH_MASK);
675 /* xgmac register are located behind the xgmac_addr and xgmac_data
676 * register pair. Each read/write requires us to wait for the ready
677 * bit before reading/writing the data.
679 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
682 /* wait for reg to come ready */
683 status = ql_wait_reg_rdy(qdev,
684 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
687 /* write the data to the data reg */
688 ql_write32(qdev, XGMAC_DATA, data);
689 /* trigger the write */
690 ql_write32(qdev, XGMAC_ADDR, reg);
694 /* xgmac register are located behind the xgmac_addr and xgmac_data
695 * register pair. Each read/write requires us to wait for the ready
696 * bit before reading/writing the data.
698 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
701 /* wait for reg to come ready */
702 status = ql_wait_reg_rdy(qdev,
703 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
706 /* set up for reg read */
707 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
708 /* wait for reg to come ready */
709 status = ql_wait_reg_rdy(qdev,
710 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
714 *data = ql_read32(qdev, XGMAC_DATA);
719 /* This is used for reading the 64-bit statistics regs. */
720 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
726 status = ql_read_xgmac_reg(qdev, reg, &lo);
730 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
734 *data = (u64) lo | ((u64) hi << 32);
740 /* Take the MAC Core out of reset.
741 * Enable statistics counting.
742 * Take the transmitter/receiver out of reset.
743 * This functionality may be done in the MPI firmware at a
746 static int ql_port_initialize(struct ql_adapter *qdev)
751 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
752 /* Another function has the semaphore, so
753 * wait for the port init bit to come ready.
755 QPRINTK(qdev, LINK, INFO,
756 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
757 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
759 QPRINTK(qdev, LINK, CRIT,
760 "Port initialize timed out.\n");
765 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
766 /* Set the core reset. */
767 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
770 data |= GLOBAL_CFG_RESET;
771 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
775 /* Clear the core reset and turn on jumbo for receiver. */
776 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
777 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
778 data |= GLOBAL_CFG_TX_STAT_EN;
779 data |= GLOBAL_CFG_RX_STAT_EN;
780 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
784 /* Enable transmitter, and clear it's reset. */
785 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
788 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
789 data |= TX_CFG_EN; /* Enable the transmitter. */
790 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
794 /* Enable receiver and clear it's reset. */
795 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
798 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
799 data |= RX_CFG_EN; /* Enable the receiver. */
800 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
806 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
810 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
814 /* Signal to the world that the port is enabled. */
815 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
817 ql_sem_unlock(qdev, qdev->xg_sem_mask);
821 /* Get the next large buffer. */
822 struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
824 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
825 rx_ring->lbq_curr_idx++;
826 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
827 rx_ring->lbq_curr_idx = 0;
828 rx_ring->lbq_free_cnt++;
832 /* Get the next small buffer. */
833 struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
835 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
836 rx_ring->sbq_curr_idx++;
837 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
838 rx_ring->sbq_curr_idx = 0;
839 rx_ring->sbq_free_cnt++;
843 /* Update an rx ring index. */
844 static void ql_update_cq(struct rx_ring *rx_ring)
846 rx_ring->cnsmr_idx++;
847 rx_ring->curr_entry++;
848 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
849 rx_ring->cnsmr_idx = 0;
850 rx_ring->curr_entry = rx_ring->cq_base;
854 static void ql_write_cq_idx(struct rx_ring *rx_ring)
856 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
859 /* Process (refill) a large buffer queue. */
860 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
862 int clean_idx = rx_ring->lbq_clean_idx;
863 struct bq_desc *lbq_desc;
864 struct bq_element *bq;
868 while (rx_ring->lbq_free_cnt > 16) {
869 for (i = 0; i < 16; i++) {
870 QPRINTK(qdev, RX_STATUS, DEBUG,
871 "lbq: try cleaning clean_idx = %d.\n",
873 lbq_desc = &rx_ring->lbq[clean_idx];
875 if (lbq_desc->p.lbq_page == NULL) {
876 QPRINTK(qdev, RX_STATUS, DEBUG,
877 "lbq: getting new page for index %d.\n",
879 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
880 if (lbq_desc->p.lbq_page == NULL) {
881 QPRINTK(qdev, RX_STATUS, ERR,
882 "Couldn't get a page.\n");
885 map = pci_map_page(qdev->pdev,
886 lbq_desc->p.lbq_page,
889 if (pci_dma_mapping_error(qdev->pdev, map)) {
890 QPRINTK(qdev, RX_STATUS, ERR,
891 "PCI mapping failed.\n");
894 pci_unmap_addr_set(lbq_desc, mapaddr, map);
895 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
896 bq->addr_lo = /*lbq_desc->addr_lo = */
898 bq->addr_hi = /*lbq_desc->addr_hi = */
899 cpu_to_le32(map >> 32);
902 if (clean_idx == rx_ring->lbq_len)
906 rx_ring->lbq_clean_idx = clean_idx;
907 rx_ring->lbq_prod_idx += 16;
908 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
909 rx_ring->lbq_prod_idx = 0;
910 QPRINTK(qdev, RX_STATUS, DEBUG,
911 "lbq: updating prod idx = %d.\n",
912 rx_ring->lbq_prod_idx);
913 ql_write_db_reg(rx_ring->lbq_prod_idx,
914 rx_ring->lbq_prod_idx_db_reg);
915 rx_ring->lbq_free_cnt -= 16;
919 /* Process (refill) a small buffer queue. */
920 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
922 int clean_idx = rx_ring->sbq_clean_idx;
923 struct bq_desc *sbq_desc;
924 struct bq_element *bq;
928 while (rx_ring->sbq_free_cnt > 16) {
929 for (i = 0; i < 16; i++) {
930 sbq_desc = &rx_ring->sbq[clean_idx];
931 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "sbq: try cleaning clean_idx = %d.\n",
935 if (sbq_desc->p.skb == NULL) {
936 QPRINTK(qdev, RX_STATUS, DEBUG,
937 "sbq: getting new skb for index %d.\n",
940 netdev_alloc_skb(qdev->ndev,
941 rx_ring->sbq_buf_size);
942 if (sbq_desc->p.skb == NULL) {
943 QPRINTK(qdev, PROBE, ERR,
944 "Couldn't get an skb.\n");
945 rx_ring->sbq_clean_idx = clean_idx;
948 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
949 map = pci_map_single(qdev->pdev,
950 sbq_desc->p.skb->data,
951 rx_ring->sbq_buf_size /
952 2, PCI_DMA_FROMDEVICE);
953 pci_unmap_addr_set(sbq_desc, mapaddr, map);
954 pci_unmap_len_set(sbq_desc, maplen,
955 rx_ring->sbq_buf_size / 2);
956 bq->addr_lo = cpu_to_le32(map);
957 bq->addr_hi = cpu_to_le32(map >> 32);
961 if (clean_idx == rx_ring->sbq_len)
964 rx_ring->sbq_clean_idx = clean_idx;
965 rx_ring->sbq_prod_idx += 16;
966 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
967 rx_ring->sbq_prod_idx = 0;
968 QPRINTK(qdev, RX_STATUS, DEBUG,
969 "sbq: updating prod idx = %d.\n",
970 rx_ring->sbq_prod_idx);
971 ql_write_db_reg(rx_ring->sbq_prod_idx,
972 rx_ring->sbq_prod_idx_db_reg);
974 rx_ring->sbq_free_cnt -= 16;
978 static void ql_update_buffer_queues(struct ql_adapter *qdev,
979 struct rx_ring *rx_ring)
981 ql_update_sbq(qdev, rx_ring);
982 ql_update_lbq(qdev, rx_ring);
985 /* Unmaps tx buffers. Can be called from send() if a pci mapping
986 * fails at some stage, or from the interrupt when a tx completes.
988 static void ql_unmap_send(struct ql_adapter *qdev,
989 struct tx_ring_desc *tx_ring_desc, int mapped)
992 for (i = 0; i < mapped; i++) {
993 if (i == 0 || (i == 7 && mapped > 7)) {
995 * Unmap the skb->data area, or the
996 * external sglist (AKA the Outbound
997 * Address List (OAL)).
998 * If its the zeroeth element, then it's
999 * the skb->data area. If it's the 7th
1000 * element and there is more than 6 frags,
1004 QPRINTK(qdev, TX_DONE, DEBUG,
1005 "unmapping OAL area.\n");
1007 pci_unmap_single(qdev->pdev,
1008 pci_unmap_addr(&tx_ring_desc->map[i],
1010 pci_unmap_len(&tx_ring_desc->map[i],
1014 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1016 pci_unmap_page(qdev->pdev,
1017 pci_unmap_addr(&tx_ring_desc->map[i],
1019 pci_unmap_len(&tx_ring_desc->map[i],
1020 maplen), PCI_DMA_TODEVICE);
1026 /* Map the buffers for this transmit. This will return
1027 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1029 static int ql_map_send(struct ql_adapter *qdev,
1030 struct ob_mac_iocb_req *mac_iocb_ptr,
1031 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1033 int len = skb_headlen(skb);
1035 int frag_idx, err, map_idx = 0;
1036 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1037 int frag_cnt = skb_shinfo(skb)->nr_frags;
1040 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1043 * Map the skb buffer first.
1045 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1047 err = pci_dma_mapping_error(qdev->pdev, map);
1049 QPRINTK(qdev, TX_QUEUED, ERR,
1050 "PCI mapping failed with error: %d\n", err);
1052 return NETDEV_TX_BUSY;
1055 tbd->len = cpu_to_le32(len);
1056 tbd->addr = cpu_to_le64(map);
1057 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1058 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1062 * This loop fills the remainder of the 8 address descriptors
1063 * in the IOCB. If there are more than 7 fragments, then the
1064 * eighth address desc will point to an external list (OAL).
1065 * When this happens, the remainder of the frags will be stored
1068 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1069 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1071 if (frag_idx == 6 && frag_cnt > 7) {
1072 /* Let's tack on an sglist.
1073 * Our control block will now
1075 * iocb->seg[0] = skb->data
1076 * iocb->seg[1] = frag[0]
1077 * iocb->seg[2] = frag[1]
1078 * iocb->seg[3] = frag[2]
1079 * iocb->seg[4] = frag[3]
1080 * iocb->seg[5] = frag[4]
1081 * iocb->seg[6] = frag[5]
1082 * iocb->seg[7] = ptr to OAL (external sglist)
1083 * oal->seg[0] = frag[6]
1084 * oal->seg[1] = frag[7]
1085 * oal->seg[2] = frag[8]
1086 * oal->seg[3] = frag[9]
1087 * oal->seg[4] = frag[10]
1090 /* Tack on the OAL in the eighth segment of IOCB. */
1091 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1094 err = pci_dma_mapping_error(qdev->pdev, map);
1096 QPRINTK(qdev, TX_QUEUED, ERR,
1097 "PCI mapping outbound address list with error: %d\n",
1102 tbd->addr = cpu_to_le64(map);
1104 * The length is the number of fragments
1105 * that remain to be mapped times the length
1106 * of our sglist (OAL).
1109 cpu_to_le32((sizeof(struct tx_buf_desc) *
1110 (frag_cnt - frag_idx)) | TX_DESC_C);
1111 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1113 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1114 sizeof(struct oal));
1115 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1120 pci_map_page(qdev->pdev, frag->page,
1121 frag->page_offset, frag->size,
1124 err = pci_dma_mapping_error(qdev->pdev, map);
1126 QPRINTK(qdev, TX_QUEUED, ERR,
1127 "PCI mapping frags failed with error: %d.\n",
1132 tbd->addr = cpu_to_le64(map);
1133 tbd->len = cpu_to_le32(frag->size);
1134 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1135 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1139 /* Save the number of segments we've mapped. */
1140 tx_ring_desc->map_cnt = map_idx;
1141 /* Terminate the last segment. */
1142 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1143 return NETDEV_TX_OK;
1147 * If the first frag mapping failed, then i will be zero.
1148 * This causes the unmap of the skb->data area. Otherwise
1149 * we pass in the number of frags that mapped successfully
1150 * so they can be umapped.
1152 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1153 return NETDEV_TX_BUSY;
1156 void ql_realign_skb(struct sk_buff *skb, int len)
1158 void *temp_addr = skb->data;
1160 /* Undo the skb_reserve(skb,32) we did before
1161 * giving to hardware, and realign data on
1162 * a 2-byte boundary.
1164 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1165 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1166 skb_copy_to_linear_data(skb, temp_addr,
1171 * This function builds an skb for the given inbound
1172 * completion. It will be rewritten for readability in the near
1173 * future, but for not it works well.
1175 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1176 struct rx_ring *rx_ring,
1177 struct ib_mac_iocb_rsp *ib_mac_rsp)
1179 struct bq_desc *lbq_desc;
1180 struct bq_desc *sbq_desc;
1181 struct sk_buff *skb = NULL;
1182 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1183 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1186 * Handle the header buffer if present.
1188 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1189 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1190 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1192 * Headers fit nicely into a small buffer.
1194 sbq_desc = ql_get_curr_sbuf(rx_ring);
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(sbq_desc, mapaddr),
1197 pci_unmap_len(sbq_desc, maplen),
1198 PCI_DMA_FROMDEVICE);
1199 skb = sbq_desc->p.skb;
1200 ql_realign_skb(skb, hdr_len);
1201 skb_put(skb, hdr_len);
1202 sbq_desc->p.skb = NULL;
1206 * Handle the data buffer(s).
1208 if (unlikely(!length)) { /* Is there data too? */
1209 QPRINTK(qdev, RX_STATUS, DEBUG,
1210 "No Data buffer in this packet.\n");
1214 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1215 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1216 QPRINTK(qdev, RX_STATUS, DEBUG,
1217 "Headers in small, data of %d bytes in small, combine them.\n", length);
1219 * Data is less than small buffer size so it's
1220 * stuffed in a small buffer.
1221 * For this case we append the data
1222 * from the "data" small buffer to the "header" small
1225 sbq_desc = ql_get_curr_sbuf(rx_ring);
1226 pci_dma_sync_single_for_cpu(qdev->pdev,
1228 (sbq_desc, mapaddr),
1231 PCI_DMA_FROMDEVICE);
1232 memcpy(skb_put(skb, length),
1233 sbq_desc->p.skb->data, length);
1234 pci_dma_sync_single_for_device(qdev->pdev,
1241 PCI_DMA_FROMDEVICE);
1243 QPRINTK(qdev, RX_STATUS, DEBUG,
1244 "%d bytes in a single small buffer.\n", length);
1245 sbq_desc = ql_get_curr_sbuf(rx_ring);
1246 skb = sbq_desc->p.skb;
1247 ql_realign_skb(skb, length);
1248 skb_put(skb, length);
1249 pci_unmap_single(qdev->pdev,
1250 pci_unmap_addr(sbq_desc,
1252 pci_unmap_len(sbq_desc,
1254 PCI_DMA_FROMDEVICE);
1255 sbq_desc->p.skb = NULL;
1257 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1258 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1259 QPRINTK(qdev, RX_STATUS, DEBUG,
1260 "Header in small, %d bytes in large. Chain large to small!\n", length);
1262 * The data is in a single large buffer. We
1263 * chain it to the header buffer's skb and let
1266 lbq_desc = ql_get_curr_lbuf(rx_ring);
1267 pci_unmap_page(qdev->pdev,
1268 pci_unmap_addr(lbq_desc,
1270 pci_unmap_len(lbq_desc, maplen),
1271 PCI_DMA_FROMDEVICE);
1272 QPRINTK(qdev, RX_STATUS, DEBUG,
1273 "Chaining page to skb.\n");
1274 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1277 skb->data_len += length;
1278 skb->truesize += length;
1279 lbq_desc->p.lbq_page = NULL;
1282 * The headers and data are in a single large buffer. We
1283 * copy it to a new skb and let it go. This can happen with
1284 * jumbo mtu on a non-TCP/UDP frame.
1286 lbq_desc = ql_get_curr_lbuf(rx_ring);
1287 skb = netdev_alloc_skb(qdev->ndev, length);
1289 QPRINTK(qdev, PROBE, DEBUG,
1290 "No skb available, drop the packet.\n");
1293 skb_reserve(skb, NET_IP_ALIGN);
1294 QPRINTK(qdev, RX_STATUS, DEBUG,
1295 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1296 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1299 skb->data_len += length;
1300 skb->truesize += length;
1302 lbq_desc->p.lbq_page = NULL;
1303 __pskb_pull_tail(skb,
1304 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1305 VLAN_ETH_HLEN : ETH_HLEN);
1309 * The data is in a chain of large buffers
1310 * pointed to by a small buffer. We loop
1311 * thru and chain them to the our small header
1313 * frags: There are 18 max frags and our small
1314 * buffer will hold 32 of them. The thing is,
1315 * we'll use 3 max for our 9000 byte jumbo
1316 * frames. If the MTU goes up we could
1317 * eventually be in trouble.
1319 int size, offset, i = 0;
1320 struct bq_element *bq, bq_array[8];
1321 sbq_desc = ql_get_curr_sbuf(rx_ring);
1322 pci_unmap_single(qdev->pdev,
1323 pci_unmap_addr(sbq_desc, mapaddr),
1324 pci_unmap_len(sbq_desc, maplen),
1325 PCI_DMA_FROMDEVICE);
1326 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1328 * This is an non TCP/UDP IP frame, so
1329 * the headers aren't split into a small
1330 * buffer. We have to use the small buffer
1331 * that contains our sg list as our skb to
1332 * send upstairs. Copy the sg list here to
1333 * a local buffer and use it to find the
1336 QPRINTK(qdev, RX_STATUS, DEBUG,
1337 "%d bytes of headers & data in chain of large.\n", length);
1338 skb = sbq_desc->p.skb;
1340 memcpy(bq, skb->data, sizeof(bq_array));
1341 sbq_desc->p.skb = NULL;
1342 skb_reserve(skb, NET_IP_ALIGN);
1344 QPRINTK(qdev, RX_STATUS, DEBUG,
1345 "Headers in small, %d bytes of data in chain of large.\n", length);
1346 bq = (struct bq_element *)sbq_desc->p.skb->data;
1348 while (length > 0) {
1349 lbq_desc = ql_get_curr_lbuf(rx_ring);
1350 if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1351 QPRINTK(qdev, RX_STATUS, ERR,
1352 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1353 lbq_desc->bq->addr_lo, bq->addr_lo);
1356 pci_unmap_page(qdev->pdev,
1357 pci_unmap_addr(lbq_desc,
1359 pci_unmap_len(lbq_desc,
1361 PCI_DMA_FROMDEVICE);
1362 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1365 QPRINTK(qdev, RX_STATUS, DEBUG,
1366 "Adding page %d to skb for %d bytes.\n",
1368 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1371 skb->data_len += size;
1372 skb->truesize += size;
1374 lbq_desc->p.lbq_page = NULL;
1378 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1379 VLAN_ETH_HLEN : ETH_HLEN);
1384 /* Process an inbound completion from an rx ring. */
1385 static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1386 struct rx_ring *rx_ring,
1387 struct ib_mac_iocb_rsp *ib_mac_rsp)
1389 struct net_device *ndev = qdev->ndev;
1390 struct sk_buff *skb = NULL;
1392 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1394 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1395 if (unlikely(!skb)) {
1396 QPRINTK(qdev, RX_STATUS, DEBUG,
1397 "No skb available, drop packet.\n");
1401 prefetch(skb->data);
1403 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1405 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1406 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1407 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1408 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1409 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1410 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1412 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1413 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1415 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1416 QPRINTK(qdev, RX_STATUS, ERR,
1417 "Bad checksum for this %s packet.\n",
1419 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1420 skb->ip_summed = CHECKSUM_NONE;
1421 } else if (qdev->rx_csum &&
1422 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1423 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1424 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1425 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1426 skb->ip_summed = CHECKSUM_UNNECESSARY;
1428 qdev->stats.rx_packets++;
1429 qdev->stats.rx_bytes += skb->len;
1430 skb->protocol = eth_type_trans(skb, ndev);
1431 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1432 QPRINTK(qdev, RX_STATUS, DEBUG,
1433 "Passing a VLAN packet upstream.\n");
1434 vlan_hwaccel_rx(skb, qdev->vlgrp,
1435 le16_to_cpu(ib_mac_rsp->vlan_id));
1437 QPRINTK(qdev, RX_STATUS, DEBUG,
1438 "Passing a normal packet upstream.\n");
1441 ndev->last_rx = jiffies;
1444 /* Process an outbound completion from an rx ring. */
1445 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1446 struct ob_mac_iocb_rsp *mac_rsp)
1448 struct tx_ring *tx_ring;
1449 struct tx_ring_desc *tx_ring_desc;
1451 QL_DUMP_OB_MAC_RSP(mac_rsp);
1452 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1453 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1454 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1455 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1456 qdev->stats.tx_packets++;
1457 dev_kfree_skb(tx_ring_desc->skb);
1458 tx_ring_desc->skb = NULL;
1460 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1463 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1464 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1465 QPRINTK(qdev, TX_DONE, WARNING,
1466 "Total descriptor length did not match transfer length.\n");
1468 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1469 QPRINTK(qdev, TX_DONE, WARNING,
1470 "Frame too short to be legal, not sent.\n");
1472 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1473 QPRINTK(qdev, TX_DONE, WARNING,
1474 "Frame too long, but sent anyway.\n");
1476 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1477 QPRINTK(qdev, TX_DONE, WARNING,
1478 "PCI backplane error. Frame not sent.\n");
1481 atomic_inc(&tx_ring->tx_count);
1484 /* Fire up a handler to reset the MPI processor. */
1485 void ql_queue_fw_error(struct ql_adapter *qdev)
1487 netif_stop_queue(qdev->ndev);
1488 netif_carrier_off(qdev->ndev);
1489 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1492 void ql_queue_asic_error(struct ql_adapter *qdev)
1494 netif_stop_queue(qdev->ndev);
1495 netif_carrier_off(qdev->ndev);
1496 ql_disable_interrupts(qdev);
1497 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1500 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1501 struct ib_ae_iocb_rsp *ib_ae_rsp)
1503 switch (ib_ae_rsp->event) {
1504 case MGMT_ERR_EVENT:
1505 QPRINTK(qdev, RX_ERR, ERR,
1506 "Management Processor Fatal Error.\n");
1507 ql_queue_fw_error(qdev);
1510 case CAM_LOOKUP_ERR_EVENT:
1511 QPRINTK(qdev, LINK, ERR,
1512 "Multiple CAM hits lookup occurred.\n");
1513 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1514 ql_queue_asic_error(qdev);
1517 case SOFT_ECC_ERROR_EVENT:
1518 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1519 ql_queue_asic_error(qdev);
1522 case PCI_ERR_ANON_BUF_RD:
1523 QPRINTK(qdev, RX_ERR, ERR,
1524 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1526 ql_queue_asic_error(qdev);
1530 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1532 ql_queue_asic_error(qdev);
1537 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1539 struct ql_adapter *qdev = rx_ring->qdev;
1540 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1541 struct ob_mac_iocb_rsp *net_rsp = NULL;
1544 /* While there are entries in the completion queue. */
1545 while (prod != rx_ring->cnsmr_idx) {
1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1548 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1549 prod, rx_ring->cnsmr_idx);
1551 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1553 switch (net_rsp->opcode) {
1555 case OPCODE_OB_MAC_TSO_IOCB:
1556 case OPCODE_OB_MAC_IOCB:
1557 ql_process_mac_tx_intr(qdev, net_rsp);
1560 QPRINTK(qdev, RX_STATUS, DEBUG,
1561 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1565 ql_update_cq(rx_ring);
1566 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1568 ql_write_cq_idx(rx_ring);
1569 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1570 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1571 if (atomic_read(&tx_ring->queue_stopped) &&
1572 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1574 * The queue got stopped because the tx_ring was full.
1575 * Wake it up, because it's now at least 25% empty.
1577 netif_wake_queue(qdev->ndev);
1583 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1585 struct ql_adapter *qdev = rx_ring->qdev;
1586 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1587 struct ql_net_rsp_iocb *net_rsp;
1590 /* While there are entries in the completion queue. */
1591 while (prod != rx_ring->cnsmr_idx) {
1593 QPRINTK(qdev, RX_STATUS, DEBUG,
1594 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1595 prod, rx_ring->cnsmr_idx);
1597 net_rsp = rx_ring->curr_entry;
1599 switch (net_rsp->opcode) {
1600 case OPCODE_IB_MAC_IOCB:
1601 ql_process_mac_rx_intr(qdev, rx_ring,
1602 (struct ib_mac_iocb_rsp *)
1606 case OPCODE_IB_AE_IOCB:
1607 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1612 QPRINTK(qdev, RX_STATUS, DEBUG,
1613 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1618 ql_update_cq(rx_ring);
1619 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1620 if (count == budget)
1623 ql_update_buffer_queues(qdev, rx_ring);
1624 ql_write_cq_idx(rx_ring);
1628 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1630 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1631 struct ql_adapter *qdev = rx_ring->qdev;
1632 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1634 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1637 if (work_done < budget) {
1638 __netif_rx_complete(qdev->ndev, napi);
1639 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1644 static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1646 struct ql_adapter *qdev = netdev_priv(ndev);
1650 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1651 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1652 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1654 QPRINTK(qdev, IFUP, DEBUG,
1655 "Turning off VLAN in NIC_RCV_CFG.\n");
1656 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1660 static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1662 struct ql_adapter *qdev = netdev_priv(ndev);
1663 u32 enable_bit = MAC_ADDR_E;
1665 spin_lock(&qdev->hw_lock);
1666 if (ql_set_mac_addr_reg
1667 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1668 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1670 spin_unlock(&qdev->hw_lock);
1673 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1675 struct ql_adapter *qdev = netdev_priv(ndev);
1678 spin_lock(&qdev->hw_lock);
1679 if (ql_set_mac_addr_reg
1680 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1681 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1683 spin_unlock(&qdev->hw_lock);
1687 /* Worker thread to process a given rx_ring that is dedicated
1688 * to outbound completions.
1690 static void ql_tx_clean(struct work_struct *work)
1692 struct rx_ring *rx_ring =
1693 container_of(work, struct rx_ring, rx_work.work);
1694 ql_clean_outbound_rx_ring(rx_ring);
1695 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1699 /* Worker thread to process a given rx_ring that is dedicated
1700 * to inbound completions.
1702 static void ql_rx_clean(struct work_struct *work)
1704 struct rx_ring *rx_ring =
1705 container_of(work, struct rx_ring, rx_work.work);
1706 ql_clean_inbound_rx_ring(rx_ring, 64);
1707 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1710 /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1711 static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1713 struct rx_ring *rx_ring = dev_id;
1714 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1715 &rx_ring->rx_work, 0);
1719 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1720 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1722 struct rx_ring *rx_ring = dev_id;
1723 struct ql_adapter *qdev = rx_ring->qdev;
1724 netif_rx_schedule(qdev->ndev, &rx_ring->napi);
1728 /* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1732 int ql_legacy_check(struct ql_adapter *qdev)
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1741 /* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of
1746 static irqreturn_t qlge_isr(int irq, void *dev_id)
1748 struct rx_ring *rx_ring = dev_id;
1749 struct ql_adapter *qdev = rx_ring->qdev;
1750 struct intr_context *intr_context = &qdev->intr_context[0];
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) {
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
1757 return IRQ_NONE; /* Not our interrupt */
1760 var = ql_read32(qdev, STS);
1763 * Check for fatal error.
1766 ql_queue_asic_error(qdev);
1767 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1768 var = ql_read32(qdev, ERR_STS);
1769 QPRINTK(qdev, INTR, ERR,
1770 "Resetting chip. Error Status Register = 0x%x\n", var);
1775 * Check MPI processor activity.
1779 * We've got an async event or mailbox completion.
1780 * Handle it and clear the source of the interrupt.
1782 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1783 ql_disable_completion_interrupt(qdev, intr_context->intr);
1784 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1785 &qdev->mpi_work, 0);
1790 * Check the default queue and wake handler if active.
1792 rx_ring = &qdev->rx_ring[0];
1793 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1794 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1795 ql_disable_completion_interrupt(qdev, intr_context->intr);
1796 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1797 &rx_ring->rx_work, 0);
1801 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1803 * Start the DPC for each active queue.
1805 for (i = 1; i < qdev->rx_ring_count; i++) {
1806 rx_ring = &qdev->rx_ring[i];
1807 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1808 rx_ring->cnsmr_idx) {
1809 QPRINTK(qdev, INTR, INFO,
1810 "Waking handler for rx_ring[%d].\n", i);
1811 ql_disable_completion_interrupt(qdev,
1814 if (i < qdev->rss_ring_first_cq_id)
1815 queue_delayed_work_on(rx_ring->cpu,
1820 netif_rx_schedule(qdev->ndev,
1826 return work_done ? IRQ_HANDLED : IRQ_NONE;
1829 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1832 if (skb_is_gso(skb)) {
1834 if (skb_header_cloned(skb)) {
1835 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1840 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1841 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1842 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1843 mac_iocb_ptr->total_hdrs_len =
1844 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1845 mac_iocb_ptr->net_trans_offset =
1846 cpu_to_le16(skb_network_offset(skb) |
1847 skb_transport_offset(skb)
1848 << OB_MAC_TRANSPORT_HDR_SHIFT);
1849 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1850 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1851 if (likely(skb->protocol == htons(ETH_P_IP))) {
1852 struct iphdr *iph = ip_hdr(skb);
1854 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1855 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1859 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1860 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1861 tcp_hdr(skb)->check =
1862 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1863 &ipv6_hdr(skb)->daddr,
1871 static void ql_hw_csum_setup(struct sk_buff *skb,
1872 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1875 struct iphdr *iph = ip_hdr(skb);
1877 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1878 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1879 mac_iocb_ptr->net_trans_offset =
1880 cpu_to_le16(skb_network_offset(skb) |
1881 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1883 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1884 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1885 if (likely(iph->protocol == IPPROTO_TCP)) {
1886 check = &(tcp_hdr(skb)->check);
1887 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1888 mac_iocb_ptr->total_hdrs_len =
1889 cpu_to_le16(skb_transport_offset(skb) +
1890 (tcp_hdr(skb)->doff << 2));
1892 check = &(udp_hdr(skb)->check);
1893 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1894 mac_iocb_ptr->total_hdrs_len =
1895 cpu_to_le16(skb_transport_offset(skb) +
1896 sizeof(struct udphdr));
1898 *check = ~csum_tcpudp_magic(iph->saddr,
1899 iph->daddr, len, iph->protocol, 0);
1902 static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1904 struct tx_ring_desc *tx_ring_desc;
1905 struct ob_mac_iocb_req *mac_iocb_ptr;
1906 struct ql_adapter *qdev = netdev_priv(ndev);
1908 struct tx_ring *tx_ring;
1909 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1911 tx_ring = &qdev->tx_ring[tx_ring_idx];
1913 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1914 QPRINTK(qdev, TX_QUEUED, INFO,
1915 "%s: shutting down tx queue %d du to lack of resources.\n",
1916 __func__, tx_ring_idx);
1917 netif_stop_queue(ndev);
1918 atomic_inc(&tx_ring->queue_stopped);
1919 return NETDEV_TX_BUSY;
1921 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1922 mac_iocb_ptr = tx_ring_desc->queue_entry;
1923 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1924 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1925 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1926 return NETDEV_TX_BUSY;
1929 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1930 mac_iocb_ptr->tid = tx_ring_desc->index;
1931 /* We use the upper 32-bits to store the tx queue for this IO.
1932 * When we get the completion we can use it to establish the context.
1934 mac_iocb_ptr->txq_idx = tx_ring_idx;
1935 tx_ring_desc->skb = skb;
1937 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1939 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1940 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1941 vlan_tx_tag_get(skb));
1942 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1943 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1945 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1947 dev_kfree_skb_any(skb);
1948 return NETDEV_TX_OK;
1949 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1950 ql_hw_csum_setup(skb,
1951 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1953 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1954 tx_ring->prod_idx++;
1955 if (tx_ring->prod_idx == tx_ring->wq_len)
1956 tx_ring->prod_idx = 0;
1959 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1960 ndev->trans_start = jiffies;
1961 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1962 tx_ring->prod_idx, skb->len);
1964 atomic_dec(&tx_ring->tx_count);
1965 return NETDEV_TX_OK;
1968 static void ql_free_shadow_space(struct ql_adapter *qdev)
1970 if (qdev->rx_ring_shadow_reg_area) {
1971 pci_free_consistent(qdev->pdev,
1973 qdev->rx_ring_shadow_reg_area,
1974 qdev->rx_ring_shadow_reg_dma);
1975 qdev->rx_ring_shadow_reg_area = NULL;
1977 if (qdev->tx_ring_shadow_reg_area) {
1978 pci_free_consistent(qdev->pdev,
1980 qdev->tx_ring_shadow_reg_area,
1981 qdev->tx_ring_shadow_reg_dma);
1982 qdev->tx_ring_shadow_reg_area = NULL;
1986 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
1988 qdev->rx_ring_shadow_reg_area =
1989 pci_alloc_consistent(qdev->pdev,
1990 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
1991 if (qdev->rx_ring_shadow_reg_area == NULL) {
1992 QPRINTK(qdev, IFUP, ERR,
1993 "Allocation of RX shadow space failed.\n");
1996 qdev->tx_ring_shadow_reg_area =
1997 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
1998 &qdev->tx_ring_shadow_reg_dma);
1999 if (qdev->tx_ring_shadow_reg_area == NULL) {
2000 QPRINTK(qdev, IFUP, ERR,
2001 "Allocation of TX shadow space failed.\n");
2002 goto err_wqp_sh_area;
2007 pci_free_consistent(qdev->pdev,
2009 qdev->rx_ring_shadow_reg_area,
2010 qdev->rx_ring_shadow_reg_dma);
2014 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2016 struct tx_ring_desc *tx_ring_desc;
2018 struct ob_mac_iocb_req *mac_iocb_ptr;
2020 mac_iocb_ptr = tx_ring->wq_base;
2021 tx_ring_desc = tx_ring->q;
2022 for (i = 0; i < tx_ring->wq_len; i++) {
2023 tx_ring_desc->index = i;
2024 tx_ring_desc->skb = NULL;
2025 tx_ring_desc->queue_entry = mac_iocb_ptr;
2029 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2030 atomic_set(&tx_ring->queue_stopped, 0);
2033 static void ql_free_tx_resources(struct ql_adapter *qdev,
2034 struct tx_ring *tx_ring)
2036 if (tx_ring->wq_base) {
2037 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2038 tx_ring->wq_base, tx_ring->wq_base_dma);
2039 tx_ring->wq_base = NULL;
2045 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2046 struct tx_ring *tx_ring)
2049 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2050 &tx_ring->wq_base_dma);
2052 if ((tx_ring->wq_base == NULL)
2053 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2054 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2058 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2059 if (tx_ring->q == NULL)
2064 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2065 tx_ring->wq_base, tx_ring->wq_base_dma);
2069 void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2072 struct bq_desc *lbq_desc;
2074 for (i = 0; i < rx_ring->lbq_len; i++) {
2075 lbq_desc = &rx_ring->lbq[i];
2076 if (lbq_desc->p.lbq_page) {
2077 pci_unmap_page(qdev->pdev,
2078 pci_unmap_addr(lbq_desc, mapaddr),
2079 pci_unmap_len(lbq_desc, maplen),
2080 PCI_DMA_FROMDEVICE);
2082 put_page(lbq_desc->p.lbq_page);
2083 lbq_desc->p.lbq_page = NULL;
2085 lbq_desc->bq->addr_lo = 0;
2086 lbq_desc->bq->addr_hi = 0;
2091 * Allocate and map a page for each element of the lbq.
2093 static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2094 struct rx_ring *rx_ring)
2097 struct bq_desc *lbq_desc;
2099 struct bq_element *bq = rx_ring->lbq_base;
2101 for (i = 0; i < rx_ring->lbq_len; i++) {
2102 lbq_desc = &rx_ring->lbq[i];
2103 memset(lbq_desc, 0, sizeof(lbq_desc));
2105 lbq_desc->index = i;
2106 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2107 if (unlikely(!lbq_desc->p.lbq_page)) {
2108 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2111 map = pci_map_page(qdev->pdev,
2112 lbq_desc->p.lbq_page,
2113 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2114 if (pci_dma_mapping_error(qdev->pdev, map)) {
2115 QPRINTK(qdev, IFUP, ERR,
2116 "PCI mapping failed.\n");
2119 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2120 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2121 bq->addr_lo = cpu_to_le32(map);
2122 bq->addr_hi = cpu_to_le32(map >> 32);
2128 ql_free_lbq_buffers(qdev, rx_ring);
2132 void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2135 struct bq_desc *sbq_desc;
2137 for (i = 0; i < rx_ring->sbq_len; i++) {
2138 sbq_desc = &rx_ring->sbq[i];
2139 if (sbq_desc == NULL) {
2140 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2143 if (sbq_desc->p.skb) {
2144 pci_unmap_single(qdev->pdev,
2145 pci_unmap_addr(sbq_desc, mapaddr),
2146 pci_unmap_len(sbq_desc, maplen),
2147 PCI_DMA_FROMDEVICE);
2148 dev_kfree_skb(sbq_desc->p.skb);
2149 sbq_desc->p.skb = NULL;
2151 if (sbq_desc->bq == NULL) {
2152 QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2156 sbq_desc->bq->addr_lo = 0;
2157 sbq_desc->bq->addr_hi = 0;
2161 /* Allocate and map an skb for each element of the sbq. */
2162 static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2163 struct rx_ring *rx_ring)
2166 struct bq_desc *sbq_desc;
2167 struct sk_buff *skb;
2169 struct bq_element *bq = rx_ring->sbq_base;
2171 for (i = 0; i < rx_ring->sbq_len; i++) {
2172 sbq_desc = &rx_ring->sbq[i];
2173 memset(sbq_desc, 0, sizeof(sbq_desc));
2174 sbq_desc->index = i;
2176 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2177 if (unlikely(!skb)) {
2178 /* Better luck next round */
2179 QPRINTK(qdev, IFUP, ERR,
2180 "small buff alloc failed for %d bytes at index %d.\n",
2181 rx_ring->sbq_buf_size, i);
2184 skb_reserve(skb, QLGE_SB_PAD);
2185 sbq_desc->p.skb = skb;
2187 * Map only half the buffer. Because the
2188 * other half may get some data copied to it
2189 * when the completion arrives.
2191 map = pci_map_single(qdev->pdev,
2193 rx_ring->sbq_buf_size / 2,
2194 PCI_DMA_FROMDEVICE);
2195 if (pci_dma_mapping_error(qdev->pdev, map)) {
2196 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2199 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2200 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2201 bq->addr_lo = /*sbq_desc->addr_lo = */
2203 bq->addr_hi = /*sbq_desc->addr_hi = */
2204 cpu_to_le32(map >> 32);
2209 ql_free_sbq_buffers(qdev, rx_ring);
2213 static void ql_free_rx_resources(struct ql_adapter *qdev,
2214 struct rx_ring *rx_ring)
2216 if (rx_ring->sbq_len)
2217 ql_free_sbq_buffers(qdev, rx_ring);
2218 if (rx_ring->lbq_len)
2219 ql_free_lbq_buffers(qdev, rx_ring);
2221 /* Free the small buffer queue. */
2222 if (rx_ring->sbq_base) {
2223 pci_free_consistent(qdev->pdev,
2225 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2226 rx_ring->sbq_base = NULL;
2229 /* Free the small buffer queue control blocks. */
2230 kfree(rx_ring->sbq);
2231 rx_ring->sbq = NULL;
2233 /* Free the large buffer queue. */
2234 if (rx_ring->lbq_base) {
2235 pci_free_consistent(qdev->pdev,
2237 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2238 rx_ring->lbq_base = NULL;
2241 /* Free the large buffer queue control blocks. */
2242 kfree(rx_ring->lbq);
2243 rx_ring->lbq = NULL;
2245 /* Free the rx queue. */
2246 if (rx_ring->cq_base) {
2247 pci_free_consistent(qdev->pdev,
2249 rx_ring->cq_base, rx_ring->cq_base_dma);
2250 rx_ring->cq_base = NULL;
2254 /* Allocate queues and buffers for this completions queue based
2255 * on the values in the parameter structure. */
2256 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2257 struct rx_ring *rx_ring)
2261 * Allocate the completion queue for this rx_ring.
2264 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2265 &rx_ring->cq_base_dma);
2267 if (rx_ring->cq_base == NULL) {
2268 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2272 if (rx_ring->sbq_len) {
2274 * Allocate small buffer queue.
2277 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2278 &rx_ring->sbq_base_dma);
2280 if (rx_ring->sbq_base == NULL) {
2281 QPRINTK(qdev, IFUP, ERR,
2282 "Small buffer queue allocation failed.\n");
2287 * Allocate small buffer queue control blocks.
2290 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2292 if (rx_ring->sbq == NULL) {
2293 QPRINTK(qdev, IFUP, ERR,
2294 "Small buffer queue control block allocation failed.\n");
2298 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2299 QPRINTK(qdev, IFUP, ERR,
2300 "Small buffer allocation failed.\n");
2305 if (rx_ring->lbq_len) {
2307 * Allocate large buffer queue.
2310 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2311 &rx_ring->lbq_base_dma);
2313 if (rx_ring->lbq_base == NULL) {
2314 QPRINTK(qdev, IFUP, ERR,
2315 "Large buffer queue allocation failed.\n");
2319 * Allocate large buffer queue control blocks.
2322 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2324 if (rx_ring->lbq == NULL) {
2325 QPRINTK(qdev, IFUP, ERR,
2326 "Large buffer queue control block allocation failed.\n");
2331 * Allocate the buffers.
2333 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2334 QPRINTK(qdev, IFUP, ERR,
2335 "Large buffer allocation failed.\n");
2343 ql_free_rx_resources(qdev, rx_ring);
2347 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2349 struct tx_ring *tx_ring;
2350 struct tx_ring_desc *tx_ring_desc;
2354 * Loop through all queues and free
2357 for (j = 0; j < qdev->tx_ring_count; j++) {
2358 tx_ring = &qdev->tx_ring[j];
2359 for (i = 0; i < tx_ring->wq_len; i++) {
2360 tx_ring_desc = &tx_ring->q[i];
2361 if (tx_ring_desc && tx_ring_desc->skb) {
2362 QPRINTK(qdev, IFDOWN, ERR,
2363 "Freeing lost SKB %p, from queue %d, index %d.\n",
2364 tx_ring_desc->skb, j,
2365 tx_ring_desc->index);
2366 ql_unmap_send(qdev, tx_ring_desc,
2367 tx_ring_desc->map_cnt);
2368 dev_kfree_skb(tx_ring_desc->skb);
2369 tx_ring_desc->skb = NULL;
2375 static void ql_free_ring_cb(struct ql_adapter *qdev)
2377 kfree(qdev->ring_mem);
2380 static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2382 /* Allocate space for tx/rx ring control blocks. */
2383 qdev->ring_mem_size =
2384 (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2385 (qdev->rx_ring_count * sizeof(struct rx_ring));
2386 qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2387 if (qdev->ring_mem == NULL) {
2390 qdev->rx_ring = qdev->ring_mem;
2391 qdev->tx_ring = qdev->ring_mem +
2392 (qdev->rx_ring_count * sizeof(struct rx_ring));
2397 static void ql_free_mem_resources(struct ql_adapter *qdev)
2401 for (i = 0; i < qdev->tx_ring_count; i++)
2402 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2403 for (i = 0; i < qdev->rx_ring_count; i++)
2404 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2405 ql_free_shadow_space(qdev);
2408 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2412 /* Allocate space for our shadow registers and such. */
2413 if (ql_alloc_shadow_space(qdev))
2416 for (i = 0; i < qdev->rx_ring_count; i++) {
2417 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2418 QPRINTK(qdev, IFUP, ERR,
2419 "RX resource allocation failed.\n");
2423 /* Allocate tx queue resources */
2424 for (i = 0; i < qdev->tx_ring_count; i++) {
2425 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2426 QPRINTK(qdev, IFUP, ERR,
2427 "TX resource allocation failed.\n");
2434 ql_free_mem_resources(qdev);
2438 /* Set up the rx ring control block and pass it to the chip.
2439 * The control block is defined as
2440 * "Completion Queue Initialization Control Block", or cqicb.
2442 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2444 struct cqicb *cqicb = &rx_ring->cqicb;
2445 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2446 (rx_ring->cq_id * sizeof(u64) * 4);
2447 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2448 (rx_ring->cq_id * sizeof(u64) * 4);
2449 void __iomem *doorbell_area =
2450 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2454 /* Set up the shadow registers for this ring. */
2455 rx_ring->prod_idx_sh_reg = shadow_reg;
2456 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2457 shadow_reg += sizeof(u64);
2458 shadow_reg_dma += sizeof(u64);
2459 rx_ring->lbq_base_indirect = shadow_reg;
2460 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2461 shadow_reg += sizeof(u64);
2462 shadow_reg_dma += sizeof(u64);
2463 rx_ring->sbq_base_indirect = shadow_reg;
2464 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2466 /* PCI doorbell mem area + 0x00 for consumer index register */
2467 rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
2468 rx_ring->cnsmr_idx = 0;
2469 rx_ring->curr_entry = rx_ring->cq_base;
2471 /* PCI doorbell mem area + 0x04 for valid register */
2472 rx_ring->valid_db_reg = doorbell_area + 0x04;
2474 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2475 rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
2477 /* PCI doorbell mem area + 0x1c */
2478 rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
2480 memset((void *)cqicb, 0, sizeof(struct cqicb));
2481 cqicb->msix_vect = rx_ring->irq;
2483 cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
2485 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
2486 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
2488 cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
2489 cqicb->prod_idx_addr_hi =
2490 cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
2493 * Set up the control block load flags.
2495 cqicb->flags = FLAGS_LC | /* Load queue base address */
2496 FLAGS_LV | /* Load MSI-X vector */
2497 FLAGS_LI; /* Load irq delay values */
2498 if (rx_ring->lbq_len) {
2499 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2500 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2501 cqicb->lbq_addr_lo =
2502 cpu_to_le32(rx_ring->lbq_base_indirect_dma);
2503 cqicb->lbq_addr_hi =
2504 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2505 cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
2506 bq_len = (u16) rx_ring->lbq_len;
2507 cqicb->lbq_len = cpu_to_le16(bq_len);
2508 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2509 rx_ring->lbq_curr_idx = 0;
2510 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2511 rx_ring->lbq_free_cnt = 16;
2513 if (rx_ring->sbq_len) {
2514 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2515 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2516 cqicb->sbq_addr_lo =
2517 cpu_to_le32(rx_ring->sbq_base_indirect_dma);
2518 cqicb->sbq_addr_hi =
2519 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2520 cqicb->sbq_buf_size =
2521 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2522 bq_len = (u16) rx_ring->sbq_len;
2523 cqicb->sbq_len = cpu_to_le16(bq_len);
2524 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2525 rx_ring->sbq_curr_idx = 0;
2526 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2527 rx_ring->sbq_free_cnt = 16;
2529 switch (rx_ring->type) {
2531 /* If there's only one interrupt, then we use
2532 * worker threads to process the outbound
2533 * completion handling rx_rings. We do this so
2534 * they can be run on multiple CPUs. There is
2535 * room to play with this more where we would only
2536 * run in a worker if there are more than x number
2537 * of outbound completions on the queue and more
2538 * than one queue active. Some threshold that
2539 * would indicate a benefit in spite of the cost
2540 * of a context switch.
2541 * If there's more than one interrupt, then the
2542 * outbound completions are processed in the ISR.
2544 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2545 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2547 /* With all debug warnings on we see a WARN_ON message
2548 * when we free the skb in the interrupt context.
2550 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2552 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2553 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2556 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2557 cqicb->irq_delay = 0;
2558 cqicb->pkt_delay = 0;
2561 /* Inbound completion handling rx_rings run in
2562 * separate NAPI contexts.
2564 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2566 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2567 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2570 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2573 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2574 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2575 CFG_LCQ, rx_ring->cq_id);
2577 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2580 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2582 * Advance the producer index for the buffer queues.
2585 if (rx_ring->lbq_len)
2586 ql_write_db_reg(rx_ring->lbq_prod_idx,
2587 rx_ring->lbq_prod_idx_db_reg);
2588 if (rx_ring->sbq_len)
2589 ql_write_db_reg(rx_ring->sbq_prod_idx,
2590 rx_ring->sbq_prod_idx_db_reg);
2594 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2596 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2597 void __iomem *doorbell_area =
2598 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2599 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2600 (tx_ring->wq_id * sizeof(u64));
2601 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2602 (tx_ring->wq_id * sizeof(u64));
2606 * Assign doorbell registers for this tx_ring.
2608 /* TX PCI doorbell mem area for tx producer index */
2609 tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
2610 tx_ring->prod_idx = 0;
2611 /* TX PCI doorbell mem area + 0x04 */
2612 tx_ring->valid_db_reg = doorbell_area + 0x04;
2615 * Assign shadow registers for this tx_ring.
2617 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2618 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2620 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2621 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2622 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2623 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2625 wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
2626 wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
2628 wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
2629 wqicb->cnsmr_idx_addr_hi =
2630 cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
2632 ql_init_tx_ring(qdev, tx_ring);
2634 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2635 (u16) tx_ring->wq_id);
2637 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2640 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2644 static void ql_disable_msix(struct ql_adapter *qdev)
2646 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2647 pci_disable_msix(qdev->pdev);
2648 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2649 kfree(qdev->msi_x_entry);
2650 qdev->msi_x_entry = NULL;
2651 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2652 pci_disable_msi(qdev->pdev);
2653 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2657 static void ql_enable_msix(struct ql_adapter *qdev)
2661 qdev->intr_count = 1;
2662 /* Get the MSIX vectors. */
2663 if (irq_type == MSIX_IRQ) {
2664 /* Try to alloc space for the msix struct,
2665 * if it fails then go to MSI/legacy.
2667 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2668 sizeof(struct msix_entry),
2670 if (!qdev->msi_x_entry) {
2675 for (i = 0; i < qdev->rx_ring_count; i++)
2676 qdev->msi_x_entry[i].entry = i;
2678 if (!pci_enable_msix
2679 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2680 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2681 qdev->intr_count = qdev->rx_ring_count;
2682 QPRINTK(qdev, IFUP, INFO,
2683 "MSI-X Enabled, got %d vectors.\n",
2687 kfree(qdev->msi_x_entry);
2688 qdev->msi_x_entry = NULL;
2689 QPRINTK(qdev, IFUP, WARNING,
2690 "MSI-X Enable failed, trying MSI.\n");
2695 if (irq_type == MSI_IRQ) {
2696 if (!pci_enable_msi(qdev->pdev)) {
2697 set_bit(QL_MSI_ENABLED, &qdev->flags);
2698 QPRINTK(qdev, IFUP, INFO,
2699 "Running with MSI interrupts.\n");
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2710 * Here we build the intr_context structures based on
2711 * our rx_ring count and intr vector count.
2712 * The intr_context structure is used to hook each vector
2713 * to possibly different handlers.
2715 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2718 struct intr_context *intr_context = &qdev->intr_context[0];
2720 ql_enable_msix(qdev);
2722 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2723 /* Each rx_ring has it's
2724 * own intr_context since we have separate
2725 * vectors for each queue.
2726 * This only true when MSI-X is enabled.
2728 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2729 qdev->rx_ring[i].irq = i;
2730 intr_context->intr = i;
2731 intr_context->qdev = qdev;
2733 * We set up each vectors enable/disable/read bits so
2734 * there's no bit/mask calculations in the critical path.
2736 intr_context->intr_en_mask =
2737 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2738 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2740 intr_context->intr_dis_mask =
2741 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2742 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2744 intr_context->intr_read_mask =
2745 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2746 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2751 * Default queue handles bcast/mcast plus
2752 * async events. Needs buffers.
2754 intr_context->handler = qlge_isr;
2755 sprintf(intr_context->name, "%s-default-queue",
2757 } else if (i < qdev->rss_ring_first_cq_id) {
2759 * Outbound queue is for outbound completions only.
2761 intr_context->handler = qlge_msix_tx_isr;
2762 sprintf(intr_context->name, "%s-txq-%d",
2763 qdev->ndev->name, i);
2766 * Inbound queues handle unicast frames only.
2768 intr_context->handler = qlge_msix_rx_isr;
2769 sprintf(intr_context->name, "%s-rxq-%d",
2770 qdev->ndev->name, i);
2775 * All rx_rings use the same intr_context since
2776 * there is only one vector.
2778 intr_context->intr = 0;
2779 intr_context->qdev = qdev;
2781 * We set up each vectors enable/disable/read bits so
2782 * there's no bit/mask calculations in the critical path.
2784 intr_context->intr_en_mask =
2785 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2786 intr_context->intr_dis_mask =
2787 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2788 INTR_EN_TYPE_DISABLE;
2789 intr_context->intr_read_mask =
2790 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2792 * Single interrupt means one handler for all rings.
2794 intr_context->handler = qlge_isr;
2795 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2796 for (i = 0; i < qdev->rx_ring_count; i++)
2797 qdev->rx_ring[i].irq = 0;
2801 static void ql_free_irq(struct ql_adapter *qdev)
2804 struct intr_context *intr_context = &qdev->intr_context[0];
2806 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2807 if (intr_context->hooked) {
2808 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2809 free_irq(qdev->msi_x_entry[i].vector,
2811 QPRINTK(qdev, IFDOWN, ERR,
2812 "freeing msix interrupt %d.\n", i);
2814 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2815 QPRINTK(qdev, IFDOWN, ERR,
2816 "freeing msi interrupt %d.\n", i);
2820 ql_disable_msix(qdev);
2823 static int ql_request_irq(struct ql_adapter *qdev)
2827 struct pci_dev *pdev = qdev->pdev;
2828 struct intr_context *intr_context = &qdev->intr_context[0];
2830 ql_resolve_queues_to_irqs(qdev);
2832 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2833 atomic_set(&intr_context->irq_cnt, 0);
2834 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2835 status = request_irq(qdev->msi_x_entry[i].vector,
2836 intr_context->handler,
2841 QPRINTK(qdev, IFUP, ERR,
2842 "Failed request for MSIX interrupt %d.\n",
2846 QPRINTK(qdev, IFUP, INFO,
2847 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2849 qdev->rx_ring[i].type ==
2850 DEFAULT_Q ? "DEFAULT_Q" : "",
2851 qdev->rx_ring[i].type ==
2853 qdev->rx_ring[i].type ==
2854 RX_Q ? "RX_Q" : "", intr_context->name);
2857 QPRINTK(qdev, IFUP, DEBUG,
2858 "trying msi or legacy interrupts.\n");
2859 QPRINTK(qdev, IFUP, DEBUG,
2860 "%s: irq = %d.\n", __func__, pdev->irq);
2861 QPRINTK(qdev, IFUP, DEBUG,
2862 "%s: context->name = %s.\n", __func__,
2863 intr_context->name);
2864 QPRINTK(qdev, IFUP, DEBUG,
2865 "%s: dev_id = 0x%p.\n", __func__,
2868 request_irq(pdev->irq, qlge_isr,
2869 test_bit(QL_MSI_ENABLED,
2871 flags) ? 0 : IRQF_SHARED,
2872 intr_context->name, &qdev->rx_ring[0]);
2876 QPRINTK(qdev, IFUP, ERR,
2877 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2879 qdev->rx_ring[0].type ==
2880 DEFAULT_Q ? "DEFAULT_Q" : "",
2881 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2882 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2883 intr_context->name);
2885 intr_context->hooked = 1;
2889 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2894 static int ql_start_rss(struct ql_adapter *qdev)
2896 struct ricb *ricb = &qdev->ricb;
2899 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2901 memset((void *)ricb, 0, sizeof(ricb));
2903 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2905 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2907 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2910 * Fill out the Indirection Table.
2912 for (i = 0; i < 32; i++)
2916 * Random values for the IPv6 and IPv4 Hash Keys.
2918 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2919 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2921 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2923 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2925 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2928 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2932 /* Initialize the frame-to-queue routing. */
2933 static int ql_route_initialize(struct ql_adapter *qdev)
2938 /* Clear all the entries in the routing table. */
2939 for (i = 0; i < 16; i++) {
2940 status = ql_set_routing_reg(qdev, i, 0, 0);
2942 QPRINTK(qdev, IFUP, ERR,
2943 "Failed to init routing register for CAM packets.\n");
2948 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2950 QPRINTK(qdev, IFUP, ERR,
2951 "Failed to init routing register for error packets.\n");
2954 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2956 QPRINTK(qdev, IFUP, ERR,
2957 "Failed to init routing register for broadcast packets.\n");
2960 /* If we have more than one inbound queue, then turn on RSS in the
2963 if (qdev->rss_ring_count > 1) {
2964 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2965 RT_IDX_RSS_MATCH, 1);
2967 QPRINTK(qdev, IFUP, ERR,
2968 "Failed to init routing register for MATCH RSS packets.\n");
2973 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2976 QPRINTK(qdev, IFUP, ERR,
2977 "Failed to init routing register for CAM packets.\n");
2983 static int ql_adapter_initialize(struct ql_adapter *qdev)
2990 * Set up the System register to halt on errors.
2992 value = SYS_EFE | SYS_FAE;
2994 ql_write32(qdev, SYS, mask | value);
2996 /* Set the default queue. */
2997 value = NIC_RCV_CFG_DFQ;
2998 mask = NIC_RCV_CFG_DFQ_MASK;
2999 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3001 /* Set the MPI interrupt to enabled. */
3002 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3004 /* Enable the function, set pagesize, enable error checking. */
3005 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3006 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3008 /* Set/clear header splitting. */
3009 mask = FSC_VM_PAGESIZE_MASK |
3010 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3011 ql_write32(qdev, FSC, mask | value);
3013 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3014 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3016 /* Start up the rx queues. */
3017 for (i = 0; i < qdev->rx_ring_count; i++) {
3018 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3020 QPRINTK(qdev, IFUP, ERR,
3021 "Failed to start rx ring[%d].\n", i);
3026 /* If there is more than one inbound completion queue
3027 * then download a RICB to configure RSS.
3029 if (qdev->rss_ring_count > 1) {
3030 status = ql_start_rss(qdev);
3032 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3037 /* Start up the tx queues. */
3038 for (i = 0; i < qdev->tx_ring_count; i++) {
3039 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3041 QPRINTK(qdev, IFUP, ERR,
3042 "Failed to start tx ring[%d].\n", i);
3047 status = ql_port_initialize(qdev);
3049 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3053 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3054 MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3056 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3060 status = ql_route_initialize(qdev);
3062 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3066 /* Start NAPI for the RSS queues. */
3067 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3068 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3070 napi_enable(&qdev->rx_ring[i].napi);
3076 /* Issue soft reset to chip. */
3077 static int ql_adapter_reset(struct ql_adapter *qdev)
3084 #define MAX_RESET_CNT 1
3087 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3088 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3089 /* Wait for reset to complete. */
3091 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3094 value = ql_read32(qdev, RST_FO);
3095 if ((value & RST_FO_FR) == 0)
3099 } while ((--max_wait_time));
3100 if (value & RST_FO_FR) {
3101 QPRINTK(qdev, IFDOWN, ERR,
3102 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3103 if (resetCnt < MAX_RESET_CNT)
3106 if (max_wait_time == 0) {
3107 status = -ETIMEDOUT;
3108 QPRINTK(qdev, IFDOWN, ERR,
3109 "ETIMEOUT!!! errored out of resetting the chip!\n");
3115 static void ql_display_dev_info(struct net_device *ndev)
3117 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3119 QPRINTK(qdev, PROBE, INFO,
3120 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3121 "XG Roll = %d, XG Rev = %d.\n",
3123 qdev->chip_rev_id & 0x0000000f,
3124 qdev->chip_rev_id >> 4 & 0x0000000f,
3125 qdev->chip_rev_id >> 8 & 0x0000000f,
3126 qdev->chip_rev_id >> 12 & 0x0000000f);
3127 QPRINTK(qdev, PROBE, INFO,
3128 "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3129 ndev->dev_addr[0], ndev->dev_addr[1],
3130 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3134 static int ql_adapter_down(struct ql_adapter *qdev)
3136 struct net_device *ndev = qdev->ndev;
3138 struct rx_ring *rx_ring;
3140 netif_stop_queue(ndev);
3141 netif_carrier_off(ndev);
3143 cancel_delayed_work_sync(&qdev->asic_reset_work);
3144 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3145 cancel_delayed_work_sync(&qdev->mpi_work);
3147 /* The default queue at index 0 is always processed in
3150 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3152 /* The rest of the rx_rings are processed in
3153 * a workqueue only if it's a single interrupt
3154 * environment (MSI/Legacy).
3156 for (i = 1; i > qdev->rx_ring_count; i++) {
3157 rx_ring = &qdev->rx_ring[i];
3158 /* Only the RSS rings use NAPI on multi irq
3159 * environment. Outbound completion processing
3160 * is done in interrupt context.
3162 if (i >= qdev->rss_ring_first_cq_id) {
3163 napi_disable(&rx_ring->napi);
3165 cancel_delayed_work_sync(&rx_ring->rx_work);
3169 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3171 ql_disable_interrupts(qdev);
3173 ql_tx_ring_clean(qdev);
3175 spin_lock(&qdev->hw_lock);
3176 status = ql_adapter_reset(qdev);
3178 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3180 spin_unlock(&qdev->hw_lock);
3184 static int ql_adapter_up(struct ql_adapter *qdev)
3188 spin_lock(&qdev->hw_lock);
3189 err = ql_adapter_initialize(qdev);
3191 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3192 spin_unlock(&qdev->hw_lock);
3195 spin_unlock(&qdev->hw_lock);
3196 set_bit(QL_ADAPTER_UP, &qdev->flags);
3197 ql_enable_interrupts(qdev);
3198 ql_enable_all_completion_interrupts(qdev);
3199 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3200 netif_carrier_on(qdev->ndev);
3201 netif_start_queue(qdev->ndev);
3206 ql_adapter_reset(qdev);
3210 static int ql_cycle_adapter(struct ql_adapter *qdev)
3214 status = ql_adapter_down(qdev);
3218 status = ql_adapter_up(qdev);
3224 QPRINTK(qdev, IFUP, ALERT,
3225 "Driver up/down cycle failed, closing device\n");
3227 dev_close(qdev->ndev);
3232 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3234 ql_free_mem_resources(qdev);
3238 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3242 if (ql_alloc_mem_resources(qdev)) {
3243 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3246 status = ql_request_irq(qdev);
3251 ql_free_mem_resources(qdev);
3255 static int qlge_close(struct net_device *ndev)
3257 struct ql_adapter *qdev = netdev_priv(ndev);
3260 * Wait for device to recover from a reset.
3261 * (Rarely happens, but possible.)
3263 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3265 ql_adapter_down(qdev);
3266 ql_release_adapter_resources(qdev);
3267 ql_free_ring_cb(qdev);
3271 static int ql_configure_rings(struct ql_adapter *qdev)
3274 struct rx_ring *rx_ring;
3275 struct tx_ring *tx_ring;
3276 int cpu_cnt = num_online_cpus();
3279 * For each processor present we allocate one
3280 * rx_ring for outbound completions, and one
3281 * rx_ring for inbound completions. Plus there is
3282 * always the one default queue. For the CPU
3283 * counts we end up with the following rx_rings:
3285 * one default queue +
3286 * (CPU count * outbound completion rx_ring) +
3287 * (CPU count * inbound (RSS) completion rx_ring)
3288 * To keep it simple we limit the total number of
3289 * queues to < 32, so we truncate CPU to 8.
3290 * This limitation can be removed when requested.
3297 * rx_ring[0] is always the default queue.
3299 /* Allocate outbound completion ring for each CPU. */
3300 qdev->tx_ring_count = cpu_cnt;
3301 /* Allocate inbound completion (RSS) ring for each CPU. */
3302 qdev->rss_ring_count = cpu_cnt;
3303 /* cq_id for the first inbound ring handler. */
3304 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3306 * qdev->rx_ring_count:
3307 * Total number of rx_rings. This includes the one
3308 * default queue, a number of outbound completion
3309 * handler rx_rings, and the number of inbound
3310 * completion handler rx_rings.
3312 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3314 if (ql_alloc_ring_cb(qdev))
3317 for (i = 0; i < qdev->tx_ring_count; i++) {
3318 tx_ring = &qdev->tx_ring[i];
3319 memset((void *)tx_ring, 0, sizeof(tx_ring));
3320 tx_ring->qdev = qdev;
3322 tx_ring->wq_len = qdev->tx_ring_size;
3324 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3327 * The completion queue ID for the tx rings start
3328 * immediately after the default Q ID, which is zero.
3330 tx_ring->cq_id = i + 1;
3333 for (i = 0; i < qdev->rx_ring_count; i++) {
3334 rx_ring = &qdev->rx_ring[i];
3335 memset((void *)rx_ring, 0, sizeof(rx_ring));
3336 rx_ring->qdev = qdev;
3338 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3339 if (i == 0) { /* Default queue at index 0. */
3341 * Default queue handles bcast/mcast plus
3342 * async events. Needs buffers.
3344 rx_ring->cq_len = qdev->rx_ring_size;
3346 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3347 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3349 rx_ring->lbq_len * sizeof(struct bq_element);
3350 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3351 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3353 rx_ring->sbq_len * sizeof(struct bq_element);
3354 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3355 rx_ring->type = DEFAULT_Q;
3356 } else if (i < qdev->rss_ring_first_cq_id) {
3358 * Outbound queue handles outbound completions only.
3360 /* outbound cq is same size as tx_ring it services. */
3361 rx_ring->cq_len = qdev->tx_ring_size;
3363 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3364 rx_ring->lbq_len = 0;
3365 rx_ring->lbq_size = 0;
3366 rx_ring->lbq_buf_size = 0;
3367 rx_ring->sbq_len = 0;
3368 rx_ring->sbq_size = 0;
3369 rx_ring->sbq_buf_size = 0;
3370 rx_ring->type = TX_Q;
3371 } else { /* Inbound completions (RSS) queues */
3373 * Inbound queues handle unicast frames only.
3375 rx_ring->cq_len = qdev->rx_ring_size;
3377 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3378 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3380 rx_ring->lbq_len * sizeof(struct bq_element);
3381 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3382 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3384 rx_ring->sbq_len * sizeof(struct bq_element);
3385 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3386 rx_ring->type = RX_Q;
3392 static int qlge_open(struct net_device *ndev)
3395 struct ql_adapter *qdev = netdev_priv(ndev);
3397 err = ql_configure_rings(qdev);
3401 err = ql_get_adapter_resources(qdev);
3405 err = ql_adapter_up(qdev);
3412 ql_release_adapter_resources(qdev);
3413 ql_free_ring_cb(qdev);
3417 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3419 struct ql_adapter *qdev = netdev_priv(ndev);
3421 if (ndev->mtu == 1500 && new_mtu == 9000) {
3422 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3423 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3424 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3425 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3426 (ndev->mtu == 9000 && new_mtu == 9000)) {
3430 ndev->mtu = new_mtu;
3434 static struct net_device_stats *qlge_get_stats(struct net_device
3437 struct ql_adapter *qdev = netdev_priv(ndev);
3438 return &qdev->stats;
3441 static void qlge_set_multicast_list(struct net_device *ndev)
3443 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3444 struct dev_mc_list *mc_ptr;
3447 spin_lock(&qdev->hw_lock);
3449 * Set or clear promiscuous mode if a
3450 * transition is taking place.
3452 if (ndev->flags & IFF_PROMISC) {
3453 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3454 if (ql_set_routing_reg
3455 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3456 QPRINTK(qdev, HW, ERR,
3457 "Failed to set promiscous mode.\n");
3459 set_bit(QL_PROMISCUOUS, &qdev->flags);
3463 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3464 if (ql_set_routing_reg
3465 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3466 QPRINTK(qdev, HW, ERR,
3467 "Failed to clear promiscous mode.\n");
3469 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3475 * Set or clear all multicast mode if a
3476 * transition is taking place.
3478 if ((ndev->flags & IFF_ALLMULTI) ||
3479 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3480 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3481 if (ql_set_routing_reg
3482 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3483 QPRINTK(qdev, HW, ERR,
3484 "Failed to set all-multi mode.\n");
3486 set_bit(QL_ALLMULTI, &qdev->flags);
3490 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3491 if (ql_set_routing_reg
3492 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3493 QPRINTK(qdev, HW, ERR,
3494 "Failed to clear all-multi mode.\n");
3496 clear_bit(QL_ALLMULTI, &qdev->flags);
3501 if (ndev->mc_count) {
3502 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3503 i++, mc_ptr = mc_ptr->next)
3504 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3505 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3506 QPRINTK(qdev, HW, ERR,
3507 "Failed to loadmulticast address.\n");
3510 if (ql_set_routing_reg
3511 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3512 QPRINTK(qdev, HW, ERR,
3513 "Failed to set multicast match mode.\n");
3515 set_bit(QL_ALLMULTI, &qdev->flags);
3519 spin_unlock(&qdev->hw_lock);
3522 static int qlge_set_mac_address(struct net_device *ndev, void *p)
3524 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3525 struct sockaddr *addr = p;
3527 if (netif_running(ndev))
3530 if (!is_valid_ether_addr(addr->sa_data))
3531 return -EADDRNOTAVAIL;
3532 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3534 spin_lock(&qdev->hw_lock);
3535 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3536 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3537 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3540 spin_unlock(&qdev->hw_lock);
3545 static void qlge_tx_timeout(struct net_device *ndev)
3547 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3548 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
3551 static void ql_asic_reset_work(struct work_struct *work)
3553 struct ql_adapter *qdev =
3554 container_of(work, struct ql_adapter, asic_reset_work.work);
3555 ql_cycle_adapter(qdev);
3558 static void ql_get_board_info(struct ql_adapter *qdev)
3561 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3563 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3564 qdev->port_link_up = STS_PL1;
3565 qdev->port_init = STS_PI1;
3566 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3567 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3569 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3570 qdev->port_link_up = STS_PL0;
3571 qdev->port_init = STS_PI0;
3572 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3573 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3575 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3578 static void ql_release_all(struct pci_dev *pdev)
3580 struct net_device *ndev = pci_get_drvdata(pdev);
3581 struct ql_adapter *qdev = netdev_priv(ndev);
3583 if (qdev->workqueue) {
3584 destroy_workqueue(qdev->workqueue);
3585 qdev->workqueue = NULL;
3587 if (qdev->q_workqueue) {
3588 destroy_workqueue(qdev->q_workqueue);
3589 qdev->q_workqueue = NULL;
3592 iounmap((void *)qdev->reg_base);
3593 if (qdev->doorbell_area)
3594 iounmap(qdev->doorbell_area);
3595 pci_release_regions(pdev);
3596 pci_set_drvdata(pdev, NULL);
3599 static int __devinit ql_init_device(struct pci_dev *pdev,
3600 struct net_device *ndev, int cards_found)
3602 struct ql_adapter *qdev = netdev_priv(ndev);
3606 memset((void *)qdev, 0, sizeof(qdev));
3607 err = pci_enable_device(pdev);
3609 dev_err(&pdev->dev, "PCI device enable failed.\n");
3613 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3615 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3619 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3620 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3621 val16 |= (PCI_EXP_DEVCTL_CERE |
3622 PCI_EXP_DEVCTL_NFERE |
3623 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3624 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3627 err = pci_request_regions(pdev, DRV_NAME);
3629 dev_err(&pdev->dev, "PCI region request failed.\n");
3633 pci_set_master(pdev);
3634 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3635 set_bit(QL_DMA64, &qdev->flags);
3636 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3638 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3640 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3644 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3648 pci_set_drvdata(pdev, ndev);
3650 ioremap_nocache(pci_resource_start(pdev, 1),
3651 pci_resource_len(pdev, 1));
3652 if (!qdev->reg_base) {
3653 dev_err(&pdev->dev, "Register mapping failed.\n");
3658 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3659 qdev->doorbell_area =
3660 ioremap_nocache(pci_resource_start(pdev, 3),
3661 pci_resource_len(pdev, 3));
3662 if (!qdev->doorbell_area) {
3663 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3668 ql_get_board_info(qdev);
3671 qdev->msg_enable = netif_msg_init(debug, default_msg);
3672 spin_lock_init(&qdev->hw_lock);
3673 spin_lock_init(&qdev->stats_lock);
3675 /* make sure the EEPROM is good */
3676 err = ql_get_flash_params(qdev);
3678 dev_err(&pdev->dev, "Invalid FLASH.\n");
3682 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3685 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3686 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3688 /* Set up the default ring sizes. */
3689 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3690 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3692 /* Set up the coalescing parameters. */
3693 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3694 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3695 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3696 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3699 * Set up the operating parameters.
3703 qdev->q_workqueue = create_workqueue(ndev->name);
3704 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3705 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3706 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3707 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3710 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3711 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3712 DRV_NAME, DRV_VERSION);
3716 ql_release_all(pdev);
3717 pci_disable_device(pdev);
3721 static int __devinit qlge_probe(struct pci_dev *pdev,
3722 const struct pci_device_id *pci_entry)
3724 struct net_device *ndev = NULL;
3725 struct ql_adapter *qdev = NULL;
3726 static int cards_found = 0;
3729 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3733 err = ql_init_device(pdev, ndev, cards_found);
3739 qdev = netdev_priv(ndev);
3740 SET_NETDEV_DEV(ndev, &pdev->dev);
3747 | NETIF_F_HW_VLAN_TX
3748 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3750 if (test_bit(QL_DMA64, &qdev->flags))
3751 ndev->features |= NETIF_F_HIGHDMA;
3754 * Set up net_device structure.
3756 ndev->tx_queue_len = qdev->tx_ring_size;
3757 ndev->irq = pdev->irq;
3758 ndev->open = qlge_open;
3759 ndev->stop = qlge_close;
3760 ndev->hard_start_xmit = qlge_send;
3761 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3762 ndev->change_mtu = qlge_change_mtu;
3763 ndev->get_stats = qlge_get_stats;
3764 ndev->set_multicast_list = qlge_set_multicast_list;
3765 ndev->set_mac_address = qlge_set_mac_address;
3766 ndev->tx_timeout = qlge_tx_timeout;
3767 ndev->watchdog_timeo = 10 * HZ;
3768 ndev->vlan_rx_register = ql_vlan_rx_register;
3769 ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
3770 ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
3771 err = register_netdev(ndev);
3773 dev_err(&pdev->dev, "net device registration failed.\n");
3774 ql_release_all(pdev);
3775 pci_disable_device(pdev);
3778 netif_carrier_off(ndev);
3779 netif_stop_queue(ndev);
3780 ql_display_dev_info(ndev);
3785 static void __devexit qlge_remove(struct pci_dev *pdev)
3787 struct net_device *ndev = pci_get_drvdata(pdev);
3788 unregister_netdev(ndev);
3789 ql_release_all(pdev);
3790 pci_disable_device(pdev);
3795 * This callback is called by the PCI subsystem whenever
3796 * a PCI bus error is detected.
3798 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3799 enum pci_channel_state state)
3801 struct net_device *ndev = pci_get_drvdata(pdev);
3802 struct ql_adapter *qdev = netdev_priv(ndev);
3804 if (netif_running(ndev))
3805 ql_adapter_down(qdev);
3807 pci_disable_device(pdev);
3809 /* Request a slot reset. */
3810 return PCI_ERS_RESULT_NEED_RESET;
3814 * This callback is called after the PCI buss has been reset.
3815 * Basically, this tries to restart the card from scratch.
3816 * This is a shortened version of the device probe/discovery code,
3817 * it resembles the first-half of the () routine.
3819 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3821 struct net_device *ndev = pci_get_drvdata(pdev);
3822 struct ql_adapter *qdev = netdev_priv(ndev);
3824 if (pci_enable_device(pdev)) {
3825 QPRINTK(qdev, IFUP, ERR,
3826 "Cannot re-enable PCI device after reset.\n");
3827 return PCI_ERS_RESULT_DISCONNECT;
3830 pci_set_master(pdev);
3832 netif_carrier_off(ndev);
3833 netif_stop_queue(ndev);
3834 ql_adapter_reset(qdev);
3836 /* Make sure the EEPROM is good */
3837 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3839 if (!is_valid_ether_addr(ndev->perm_addr)) {
3840 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3841 return PCI_ERS_RESULT_DISCONNECT;
3844 return PCI_ERS_RESULT_RECOVERED;
3847 static void qlge_io_resume(struct pci_dev *pdev)
3849 struct net_device *ndev = pci_get_drvdata(pdev);
3850 struct ql_adapter *qdev = netdev_priv(ndev);
3852 pci_set_master(pdev);
3854 if (netif_running(ndev)) {
3855 if (ql_adapter_up(qdev)) {
3856 QPRINTK(qdev, IFUP, ERR,
3857 "Device initialization failed after reset.\n");
3862 netif_device_attach(ndev);
3865 static struct pci_error_handlers qlge_err_handler = {
3866 .error_detected = qlge_io_error_detected,
3867 .slot_reset = qlge_io_slot_reset,
3868 .resume = qlge_io_resume,
3871 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3873 struct net_device *ndev = pci_get_drvdata(pdev);
3874 struct ql_adapter *qdev = netdev_priv(ndev);
3877 netif_device_detach(ndev);
3879 if (netif_running(ndev)) {
3880 err = ql_adapter_down(qdev);
3885 err = pci_save_state(pdev);
3889 pci_disable_device(pdev);
3891 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3897 static int qlge_resume(struct pci_dev *pdev)
3899 struct net_device *ndev = pci_get_drvdata(pdev);
3900 struct ql_adapter *qdev = netdev_priv(ndev);
3903 pci_set_power_state(pdev, PCI_D0);
3904 pci_restore_state(pdev);
3905 err = pci_enable_device(pdev);
3907 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3910 pci_set_master(pdev);
3912 pci_enable_wake(pdev, PCI_D3hot, 0);
3913 pci_enable_wake(pdev, PCI_D3cold, 0);
3915 if (netif_running(ndev)) {
3916 err = ql_adapter_up(qdev);
3921 netif_device_attach(ndev);
3925 #endif /* CONFIG_PM */
3927 static void qlge_shutdown(struct pci_dev *pdev)
3929 qlge_suspend(pdev, PMSG_SUSPEND);
3932 static struct pci_driver qlge_driver = {
3934 .id_table = qlge_pci_tbl,
3935 .probe = qlge_probe,
3936 .remove = __devexit_p(qlge_remove),
3938 .suspend = qlge_suspend,
3939 .resume = qlge_resume,
3941 .shutdown = qlge_shutdown,
3942 .err_handler = &qlge_err_handler
3945 static int __init qlge_init_module(void)
3947 return pci_register_driver(&qlge_driver);
3950 static void __exit qlge_exit(void)
3952 pci_unregister_driver(&qlge_driver);
3955 module_init(qlge_init_module);
3956 module_exit(qlge_exit);