2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/interrupt.h>
14 #include <linux/swab.h>
15 #include <linux/dma-mapping.h>
17 #include <linux/ipv6.h>
18 #include <linux/inetdevice.h>
19 #include <linux/sysfs.h>
20 #include <linux/aer.h>
21 #include <linux/log2.h>
23 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
24 MODULE_LICENSE("GPL");
25 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26 MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
28 char qlcnic_driver_name[] = "qlcnic";
29 static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
32 static struct workqueue_struct *qlcnic_wq;
33 static int qlcnic_mac_learn;
34 module_param(qlcnic_mac_learn, int, 0444);
35 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
37 static int use_msi = 1;
38 module_param(use_msi, int, 0444);
39 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
41 static int use_msi_x = 1;
42 module_param(use_msi_x, int, 0444);
43 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
45 static int auto_fw_reset = 1;
46 module_param(auto_fw_reset, int, 0644);
47 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
49 static int load_fw_file;
50 module_param(load_fw_file, int, 0444);
51 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
53 static int qlcnic_config_npars;
54 module_param(qlcnic_config_npars, int, 0444);
55 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
57 static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59 static void __devexit qlcnic_remove(struct pci_dev *pdev);
60 static int qlcnic_open(struct net_device *netdev);
61 static int qlcnic_close(struct net_device *netdev);
62 static void qlcnic_tx_timeout(struct net_device *netdev);
63 static void qlcnic_attach_work(struct work_struct *work);
64 static void qlcnic_fwinit_work(struct work_struct *work);
65 static void qlcnic_fw_poll_work(struct work_struct *work);
66 static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69 static int qlcnic_poll(struct napi_struct *napi, int budget);
70 static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void qlcnic_poll_controller(struct net_device *netdev);
75 static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76 static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77 static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78 static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
80 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
81 static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
82 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
84 static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
85 static irqreturn_t qlcnic_intr(int irq, void *data);
86 static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87 static irqreturn_t qlcnic_msix_intr(int irq, void *data);
89 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
90 static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
91 static int qlcnic_start_firmware(struct qlcnic_adapter *);
93 static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
94 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
95 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
96 static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
97 static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
99 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *);
101 static void qlcnic_vlan_rx_add(struct net_device *, u16);
102 static void qlcnic_vlan_rx_del(struct net_device *, u16);
104 /* PCI Device ID Table */
105 #define ENTRY(device) \
106 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
107 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
109 #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
111 static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
116 MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
120 qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
121 struct qlcnic_host_tx_ring *tx_ring)
123 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
126 static const u32 msi_tgt_status[8] = {
127 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
128 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
129 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
130 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
134 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
136 static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
138 writel(0, sds_ring->crb_intr_mask);
141 static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
143 struct qlcnic_adapter *adapter = sds_ring->adapter;
145 writel(0x1, sds_ring->crb_intr_mask);
147 if (!QLCNIC_IS_MSI_FAMILY(adapter))
148 writel(0xfbff, adapter->tgt_mask_reg);
152 qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
154 int size = sizeof(struct qlcnic_host_sds_ring) * count;
156 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
158 return recv_ctx->sds_rings == NULL;
162 qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
164 if (recv_ctx->sds_rings != NULL)
165 kfree(recv_ctx->sds_rings);
167 recv_ctx->sds_rings = NULL;
171 qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
174 struct qlcnic_host_sds_ring *sds_ring;
175 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
177 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
180 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
181 sds_ring = &recv_ctx->sds_rings[ring];
183 if (ring == adapter->max_sds_rings - 1)
184 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
185 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
187 netif_napi_add(netdev, &sds_ring->napi,
188 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
195 qlcnic_napi_del(struct qlcnic_adapter *adapter)
198 struct qlcnic_host_sds_ring *sds_ring;
199 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
201 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
202 sds_ring = &recv_ctx->sds_rings[ring];
203 netif_napi_del(&sds_ring->napi);
206 qlcnic_free_sds_rings(adapter->recv_ctx);
210 qlcnic_napi_enable(struct qlcnic_adapter *adapter)
213 struct qlcnic_host_sds_ring *sds_ring;
214 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
216 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
219 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
220 sds_ring = &recv_ctx->sds_rings[ring];
221 napi_enable(&sds_ring->napi);
222 qlcnic_enable_int(sds_ring);
227 qlcnic_napi_disable(struct qlcnic_adapter *adapter)
230 struct qlcnic_host_sds_ring *sds_ring;
231 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
236 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
237 sds_ring = &recv_ctx->sds_rings[ring];
238 qlcnic_disable_int(sds_ring);
239 napi_synchronize(&sds_ring->napi);
240 napi_disable(&sds_ring->napi);
244 static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
246 memset(&adapter->stats, 0, sizeof(adapter->stats));
249 static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
254 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
256 pci_read_config_dword(pdev, pos, &control);
258 control |= PCI_MSIX_FLAGS_ENABLE;
261 pci_write_config_dword(pdev, pos, control);
265 static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
269 for (i = 0; i < count; i++)
270 adapter->msix_entries[i].entry = i;
274 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
276 u8 mac_addr[ETH_ALEN];
277 struct net_device *netdev = adapter->netdev;
278 struct pci_dev *pdev = adapter->pdev;
280 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
283 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
284 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
285 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
287 /* set station address */
289 if (!is_valid_ether_addr(netdev->perm_addr))
290 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
296 static int qlcnic_set_mac(struct net_device *netdev, void *p)
298 struct qlcnic_adapter *adapter = netdev_priv(netdev);
299 struct sockaddr *addr = p;
301 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
304 if (!is_valid_ether_addr(addr->sa_data))
307 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
308 netif_device_detach(netdev);
309 qlcnic_napi_disable(adapter);
312 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
313 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
314 qlcnic_set_multi(adapter->netdev);
316 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
317 netif_device_attach(netdev);
318 qlcnic_napi_enable(adapter);
323 static const struct net_device_ops qlcnic_netdev_ops = {
324 .ndo_open = qlcnic_open,
325 .ndo_stop = qlcnic_close,
326 .ndo_start_xmit = qlcnic_xmit_frame,
327 .ndo_get_stats = qlcnic_get_stats,
328 .ndo_validate_addr = eth_validate_addr,
329 .ndo_set_multicast_list = qlcnic_set_multi,
330 .ndo_set_mac_address = qlcnic_set_mac,
331 .ndo_change_mtu = qlcnic_change_mtu,
332 .ndo_fix_features = qlcnic_fix_features,
333 .ndo_set_features = qlcnic_set_features,
334 .ndo_tx_timeout = qlcnic_tx_timeout,
335 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
336 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
337 #ifdef CONFIG_NET_POLL_CONTROLLER
338 .ndo_poll_controller = qlcnic_poll_controller,
342 static struct qlcnic_nic_template qlcnic_ops = {
343 .config_bridged_mode = qlcnic_config_bridged_mode,
344 .config_led = qlcnic_config_led,
345 .start_firmware = qlcnic_start_firmware
348 static struct qlcnic_nic_template qlcnic_vf_ops = {
349 .config_bridged_mode = qlcnicvf_config_bridged_mode,
350 .config_led = qlcnicvf_config_led,
351 .start_firmware = qlcnicvf_start_firmware
354 static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
356 struct pci_dev *pdev = adapter->pdev;
359 adapter->max_sds_rings = 1;
360 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
361 qlcnic_set_msix_bit(pdev, 0);
363 if (adapter->msix_supported) {
365 qlcnic_init_msix_entries(adapter, num_msix);
366 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
368 adapter->flags |= QLCNIC_MSIX_ENABLED;
369 qlcnic_set_msix_bit(pdev, 1);
371 adapter->max_sds_rings = num_msix;
373 dev_info(&pdev->dev, "using msi-x interrupts\n");
377 num_msix = rounddown_pow_of_two(err);
386 static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
388 const struct qlcnic_legacy_intr_set *legacy_intrp;
389 struct pci_dev *pdev = adapter->pdev;
391 if (use_msi && !pci_enable_msi(pdev)) {
392 adapter->flags |= QLCNIC_MSI_ENABLED;
393 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
394 msi_tgt_status[adapter->ahw->pci_func]);
395 dev_info(&pdev->dev, "using msi interrupts\n");
396 adapter->msix_entries[0].vector = pdev->irq;
400 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
402 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
403 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
404 legacy_intrp->tgt_status_reg);
405 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
406 legacy_intrp->tgt_mask_reg);
407 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
409 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
411 dev_info(&pdev->dev, "using legacy interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq;
416 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
420 if (adapter->msix_supported) {
421 num_msix = (num_online_cpus() >=
422 QLCNIC_DEF_NUM_STS_DESC_RINGS) ?
423 QLCNIC_DEF_NUM_STS_DESC_RINGS :
424 QLCNIC_MIN_NUM_RSS_RINGS;
428 if (!qlcnic_enable_msix(adapter, num_msix))
431 qlcnic_enable_msi_legacy(adapter);
435 qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
437 if (adapter->flags & QLCNIC_MSIX_ENABLED)
438 pci_disable_msix(adapter->pdev);
439 if (adapter->flags & QLCNIC_MSI_ENABLED)
440 pci_disable_msi(adapter->pdev);
444 qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
446 if (adapter->ahw->pci_base0 != NULL)
447 iounmap(adapter->ahw->pci_base0);
451 qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
453 struct qlcnic_pci_info *pci_info;
457 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
461 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
462 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
463 if (!adapter->npars) {
468 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
469 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
470 if (!adapter->eswitch) {
475 ret = qlcnic_get_pci_info(adapter, pci_info);
479 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
480 pfn = pci_info[i].id;
481 if (pfn > QLCNIC_MAX_PCI_FUNC) {
482 ret = QL_STATUS_INVALID_PARAM;
485 adapter->npars[pfn].active = (u8)pci_info[i].active;
486 adapter->npars[pfn].type = (u8)pci_info[i].type;
487 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
488 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
489 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
492 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
493 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
499 kfree(adapter->eswitch);
500 adapter->eswitch = NULL;
502 kfree(adapter->npars);
503 adapter->npars = NULL;
511 qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
516 u32 data = QLCNIC_MGMT_FUNC;
517 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
519 /* If other drivers are not in use set their privilege level */
520 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
521 ret = qlcnic_api_lock(adapter);
525 if (qlcnic_config_npars) {
526 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
528 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
529 id == adapter->ahw->pci_func)
531 data |= (qlcnic_config_npars &
532 QLC_DEV_SET_DRV(0xf, id));
535 data = readl(priv_op);
536 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
537 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
538 adapter->ahw->pci_func));
540 writel(data, priv_op);
541 qlcnic_api_unlock(adapter);
547 qlcnic_check_vf(struct qlcnic_adapter *adapter)
549 void __iomem *msix_base_addr;
550 void __iomem *priv_op;
553 u32 op_mode, priv_level;
555 /* Determine FW API version */
556 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
559 /* Find PCI function number */
560 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
561 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
562 msix_base = readl(msix_base_addr);
563 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
564 adapter->ahw->pci_func = func;
566 /* Determine function privilege level */
567 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
568 op_mode = readl(priv_op);
569 if (op_mode == QLC_DEV_DRV_DEFAULT)
570 priv_level = QLCNIC_MGMT_FUNC;
572 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
574 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
575 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
576 dev_info(&adapter->pdev->dev,
577 "HAL Version: %d Non Privileged function\n",
578 adapter->fw_hal_version);
579 adapter->nic_ops = &qlcnic_vf_ops;
581 adapter->nic_ops = &qlcnic_ops;
585 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
587 void __iomem *mem_ptr0 = NULL;
588 resource_size_t mem_base;
589 unsigned long mem_len, pci_len0 = 0;
591 struct pci_dev *pdev = adapter->pdev;
593 /* remap phys address */
594 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
595 mem_len = pci_resource_len(pdev, 0);
597 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
599 mem_ptr0 = pci_ioremap_bar(pdev, 0);
600 if (mem_ptr0 == NULL) {
601 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
609 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
611 adapter->ahw->pci_base0 = mem_ptr0;
612 adapter->ahw->pci_len0 = pci_len0;
614 qlcnic_check_vf(adapter);
616 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
617 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
618 adapter->ahw->pci_func)));
623 static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
625 struct pci_dev *pdev = adapter->pdev;
628 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
629 if (qlcnic_boards[i].vendor == pdev->vendor &&
630 qlcnic_boards[i].device == pdev->device &&
631 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
632 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
633 sprintf(name, "%pM: %s" ,
635 qlcnic_boards[i].short_name);
643 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
647 qlcnic_check_options(struct qlcnic_adapter *adapter)
649 u32 fw_major, fw_minor, fw_build;
650 struct pci_dev *pdev = adapter->pdev;
652 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
653 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
654 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
656 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
658 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
659 fw_major, fw_minor, fw_build);
660 if (adapter->ahw->port_type == QLCNIC_XGBE) {
661 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
662 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
663 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
665 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
666 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
669 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
670 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
672 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
673 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
674 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
675 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
676 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
679 adapter->msix_supported = !!use_msi_x;
681 adapter->num_txd = MAX_CMD_DESCRIPTORS;
683 adapter->max_rds_rings = MAX_RDS_RINGS;
687 qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
690 struct qlcnic_info nic_info;
692 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
696 adapter->physical_port = (u8)nic_info.phys_port;
697 adapter->switch_mode = nic_info.switch_mode;
698 adapter->max_tx_ques = nic_info.max_tx_ques;
699 adapter->max_rx_ques = nic_info.max_rx_ques;
700 adapter->capabilities = nic_info.capabilities;
701 adapter->max_mac_filters = nic_info.max_mac_filters;
702 adapter->max_mtu = nic_info.max_mtu;
704 if (adapter->capabilities & BIT_6)
705 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
707 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
713 qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
714 struct qlcnic_esw_func_cfg *esw_cfg)
716 if (esw_cfg->discard_tagged)
717 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
719 adapter->flags |= QLCNIC_TAGGING_ENABLED;
721 if (esw_cfg->vlan_id)
722 adapter->pvid = esw_cfg->vlan_id;
728 qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
730 struct qlcnic_adapter *adapter = netdev_priv(netdev);
731 set_bit(vid, adapter->vlans);
735 qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
737 struct qlcnic_adapter *adapter = netdev_priv(netdev);
739 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
740 clear_bit(vid, adapter->vlans);
744 qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
745 struct qlcnic_esw_func_cfg *esw_cfg)
747 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
748 QLCNIC_PROMISC_DISABLED);
750 if (esw_cfg->mac_anti_spoof)
751 adapter->flags |= QLCNIC_MACSPOOF;
753 if (!esw_cfg->mac_override)
754 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
756 if (!esw_cfg->promisc_mode)
757 adapter->flags |= QLCNIC_PROMISC_DISABLED;
759 qlcnic_set_netdev_features(adapter, esw_cfg);
763 qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
765 struct qlcnic_esw_func_cfg esw_cfg;
767 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
770 esw_cfg.pci_func = adapter->ahw->pci_func;
771 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
773 qlcnic_set_vlan_config(adapter, &esw_cfg);
774 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
780 qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
781 struct qlcnic_esw_func_cfg *esw_cfg)
783 struct net_device *netdev = adapter->netdev;
784 unsigned long features, vlan_features;
786 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
787 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
788 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
789 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
791 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
792 features |= (NETIF_F_TSO | NETIF_F_TSO6);
793 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
796 if (netdev->features & NETIF_F_LRO)
797 features |= NETIF_F_LRO;
799 if (esw_cfg->offload_flags & BIT_0) {
800 netdev->features |= features;
801 if (!(esw_cfg->offload_flags & BIT_1))
802 netdev->features &= ~NETIF_F_TSO;
803 if (!(esw_cfg->offload_flags & BIT_2))
804 netdev->features &= ~NETIF_F_TSO6;
806 netdev->features &= ~features;
809 netdev->vlan_features = (features & vlan_features);
813 qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
815 void __iomem *priv_op;
816 u32 op_mode, priv_level;
819 err = qlcnic_initialize_nic(adapter);
823 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
826 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
827 op_mode = readl(priv_op);
828 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
830 if (op_mode == QLC_DEV_DRV_DEFAULT)
831 priv_level = QLCNIC_MGMT_FUNC;
833 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
835 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
836 if (priv_level == QLCNIC_MGMT_FUNC) {
837 adapter->op_mode = QLCNIC_MGMT_FUNC;
838 err = qlcnic_init_pci_info(adapter);
841 /* Set privilege level for other functions */
842 qlcnic_set_function_modes(adapter);
843 dev_info(&adapter->pdev->dev,
844 "HAL Version: %d, Management function\n",
845 adapter->fw_hal_version);
846 } else if (priv_level == QLCNIC_PRIV_FUNC) {
847 adapter->op_mode = QLCNIC_PRIV_FUNC;
848 dev_info(&adapter->pdev->dev,
849 "HAL Version: %d, Privileged function\n",
850 adapter->fw_hal_version);
854 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
860 qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
862 struct qlcnic_esw_func_cfg esw_cfg;
863 struct qlcnic_npar_info *npar;
866 if (adapter->need_fw_reset)
869 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
870 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
872 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
873 esw_cfg.pci_func = i;
874 esw_cfg.offload_flags = BIT_0;
875 esw_cfg.mac_override = BIT_0;
876 esw_cfg.promisc_mode = BIT_0;
877 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
878 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
879 if (qlcnic_config_switch_port(adapter, &esw_cfg))
881 npar = &adapter->npars[i];
882 npar->pvid = esw_cfg.vlan_id;
883 npar->mac_override = esw_cfg.mac_override;
884 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
885 npar->discard_tagged = esw_cfg.discard_tagged;
886 npar->promisc_mode = esw_cfg.promisc_mode;
887 npar->offload_flags = esw_cfg.offload_flags;
894 qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
895 struct qlcnic_npar_info *npar, int pci_func)
897 struct qlcnic_esw_func_cfg esw_cfg;
898 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
899 esw_cfg.pci_func = pci_func;
900 esw_cfg.vlan_id = npar->pvid;
901 esw_cfg.mac_override = npar->mac_override;
902 esw_cfg.discard_tagged = npar->discard_tagged;
903 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
904 esw_cfg.offload_flags = npar->offload_flags;
905 esw_cfg.promisc_mode = npar->promisc_mode;
906 if (qlcnic_config_switch_port(adapter, &esw_cfg))
909 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
910 if (qlcnic_config_switch_port(adapter, &esw_cfg))
917 qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
920 struct qlcnic_npar_info *npar;
921 struct qlcnic_info nic_info;
923 if (!adapter->need_fw_reset)
926 /* Set the NPAR config data after FW reset */
927 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
928 npar = &adapter->npars[i];
929 if (npar->type != QLCNIC_TYPE_NIC)
931 err = qlcnic_get_nic_info(adapter, &nic_info, i);
934 nic_info.min_tx_bw = npar->min_bw;
935 nic_info.max_tx_bw = npar->max_bw;
936 err = qlcnic_set_nic_info(adapter, &nic_info);
940 if (npar->enable_pm) {
941 err = qlcnic_config_port_mirroring(adapter,
942 npar->dest_npar, 1, i);
946 err = qlcnic_reset_eswitch_config(adapter, npar, i);
953 static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
955 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
958 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
961 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
962 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
964 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
966 if (!npar_opt_timeo) {
967 dev_err(&adapter->pdev->dev,
968 "Waiting for NPAR state to opertional timeout\n");
975 qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
979 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
980 adapter->op_mode != QLCNIC_MGMT_FUNC)
983 err = qlcnic_set_default_offload_settings(adapter);
987 err = qlcnic_reset_npar_config(adapter);
991 qlcnic_dev_set_npar_ready(adapter);
997 qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1001 err = qlcnic_can_start_firmware(adapter);
1005 goto check_fw_status;
1008 qlcnic_request_firmware(adapter);
1010 err = qlcnic_check_flash_fw_ver(adapter);
1014 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
1017 err = qlcnic_need_fw_reset(adapter);
1019 goto check_fw_status;
1021 err = qlcnic_pinit_from_rom(adapter);
1025 err = qlcnic_load_firmware(adapter);
1029 qlcnic_release_firmware(adapter);
1030 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
1033 err = qlcnic_check_fw_status(adapter);
1037 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
1038 qlcnic_idc_debug_info(adapter, 1);
1040 err = qlcnic_check_eswitch_mode(adapter);
1042 dev_err(&adapter->pdev->dev,
1043 "Memory allocation failed for eswitch\n");
1046 err = qlcnic_set_mgmt_operations(adapter);
1050 qlcnic_check_options(adapter);
1051 adapter->need_fw_reset = 0;
1053 qlcnic_release_firmware(adapter);
1057 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1058 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
1060 qlcnic_release_firmware(adapter);
1065 qlcnic_request_irq(struct qlcnic_adapter *adapter)
1067 irq_handler_t handler;
1068 struct qlcnic_host_sds_ring *sds_ring;
1071 unsigned long flags = 0;
1072 struct net_device *netdev = adapter->netdev;
1073 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1075 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1076 handler = qlcnic_tmp_intr;
1077 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1078 flags |= IRQF_SHARED;
1081 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1082 handler = qlcnic_msix_intr;
1083 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1084 handler = qlcnic_msi_intr;
1086 flags |= IRQF_SHARED;
1087 handler = qlcnic_intr;
1090 adapter->irq = netdev->irq;
1092 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1093 sds_ring = &recv_ctx->sds_rings[ring];
1094 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1095 err = request_irq(sds_ring->irq, handler,
1096 flags, sds_ring->name, sds_ring);
1105 qlcnic_free_irq(struct qlcnic_adapter *adapter)
1108 struct qlcnic_host_sds_ring *sds_ring;
1110 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1112 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1113 sds_ring = &recv_ctx->sds_rings[ring];
1114 free_irq(sds_ring->irq, sds_ring);
1119 __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1122 struct qlcnic_host_rds_ring *rds_ring;
1124 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1127 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1129 if (qlcnic_set_eswitch_port_config(adapter))
1132 if (qlcnic_fw_create_ctx(adapter))
1135 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1136 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1137 qlcnic_post_rx_buffers(adapter, rds_ring);
1140 qlcnic_set_multi(netdev);
1141 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1143 adapter->ahw->linkup = 0;
1145 if (adapter->max_sds_rings > 1)
1146 qlcnic_config_rss(adapter, 1);
1148 qlcnic_config_intr_coalesce(adapter);
1150 if (netdev->features & NETIF_F_LRO)
1151 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1153 qlcnic_napi_enable(adapter);
1155 qlcnic_linkevent_request(adapter, 1);
1157 adapter->reset_context = 0;
1158 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1162 /* Usage: During resume and firmware recovery module.*/
1165 qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1170 if (netif_running(netdev))
1171 err = __qlcnic_up(adapter, netdev);
1178 __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1180 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1183 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1187 spin_lock(&adapter->tx_clean_lock);
1188 netif_carrier_off(netdev);
1189 netif_tx_disable(netdev);
1191 qlcnic_free_mac_list(adapter);
1193 if (adapter->fhash.fnum)
1194 qlcnic_delete_lb_filters(adapter);
1196 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1198 qlcnic_napi_disable(adapter);
1200 qlcnic_fw_destroy_ctx(adapter);
1202 qlcnic_reset_rx_buffers_list(adapter);
1203 qlcnic_release_tx_buffers(adapter);
1204 spin_unlock(&adapter->tx_clean_lock);
1207 /* Usage: During suspend and firmware recovery module */
1210 qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1213 if (netif_running(netdev))
1214 __qlcnic_down(adapter, netdev);
1220 qlcnic_attach(struct qlcnic_adapter *adapter)
1222 struct net_device *netdev = adapter->netdev;
1223 struct pci_dev *pdev = adapter->pdev;
1226 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1229 err = qlcnic_napi_add(adapter, netdev);
1233 err = qlcnic_alloc_sw_resources(adapter);
1235 dev_err(&pdev->dev, "Error in setting sw resources\n");
1236 goto err_out_napi_del;
1239 err = qlcnic_alloc_hw_resources(adapter);
1241 dev_err(&pdev->dev, "Error in setting hw resources\n");
1242 goto err_out_free_sw;
1245 err = qlcnic_request_irq(adapter);
1247 dev_err(&pdev->dev, "failed to setup interrupt\n");
1248 goto err_out_free_hw;
1251 qlcnic_create_sysfs_entries(adapter);
1253 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1257 qlcnic_free_hw_resources(adapter);
1259 qlcnic_free_sw_resources(adapter);
1261 qlcnic_napi_del(adapter);
1266 qlcnic_detach(struct qlcnic_adapter *adapter)
1268 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1271 qlcnic_remove_sysfs_entries(adapter);
1273 qlcnic_free_hw_resources(adapter);
1274 qlcnic_release_rx_buffers(adapter);
1275 qlcnic_free_irq(adapter);
1276 qlcnic_napi_del(adapter);
1277 qlcnic_free_sw_resources(adapter);
1282 void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1284 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1285 struct qlcnic_host_sds_ring *sds_ring;
1288 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1289 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1290 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1291 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1292 qlcnic_disable_int(sds_ring);
1296 qlcnic_fw_destroy_ctx(adapter);
1298 qlcnic_detach(adapter);
1300 adapter->diag_test = 0;
1301 adapter->max_sds_rings = max_sds_rings;
1303 if (qlcnic_attach(adapter))
1306 if (netif_running(netdev))
1307 __qlcnic_up(adapter, netdev);
1309 netif_device_attach(netdev);
1312 static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1315 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1317 if (!adapter->ahw) {
1318 dev_err(&adapter->pdev->dev,
1319 "Failed to allocate recv ctx resources for adapter\n");
1323 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1325 if (!adapter->recv_ctx) {
1326 dev_err(&adapter->pdev->dev,
1327 "Failed to allocate recv ctx resources for adapter\n");
1328 kfree(adapter->ahw);
1329 adapter->ahw = NULL;
1333 /* Initialize interrupt coalesce parameters */
1334 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1335 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1336 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1341 static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1343 kfree(adapter->recv_ctx);
1344 adapter->recv_ctx = NULL;
1346 if (adapter->ahw->fw_dump.tmpl_hdr) {
1347 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1348 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1350 kfree(adapter->ahw);
1351 adapter->ahw = NULL;
1354 int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1356 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1357 struct qlcnic_host_sds_ring *sds_ring;
1358 struct qlcnic_host_rds_ring *rds_ring;
1362 netif_device_detach(netdev);
1364 if (netif_running(netdev))
1365 __qlcnic_down(adapter, netdev);
1367 qlcnic_detach(adapter);
1369 adapter->max_sds_rings = 1;
1370 adapter->diag_test = test;
1372 ret = qlcnic_attach(adapter);
1374 netif_device_attach(netdev);
1378 ret = qlcnic_fw_create_ctx(adapter);
1380 qlcnic_detach(adapter);
1381 netif_device_attach(netdev);
1385 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1386 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1387 qlcnic_post_rx_buffers(adapter, rds_ring);
1390 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1391 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1392 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1393 qlcnic_enable_int(sds_ring);
1396 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1401 /* Reset context in hardware only */
1403 qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1405 struct net_device *netdev = adapter->netdev;
1407 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1410 netif_device_detach(netdev);
1412 qlcnic_down(adapter, netdev);
1414 qlcnic_up(adapter, netdev);
1416 netif_device_attach(netdev);
1418 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1423 qlcnic_reset_context(struct qlcnic_adapter *adapter)
1426 struct net_device *netdev = adapter->netdev;
1428 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1431 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1433 netif_device_detach(netdev);
1435 if (netif_running(netdev))
1436 __qlcnic_down(adapter, netdev);
1438 qlcnic_detach(adapter);
1440 if (netif_running(netdev)) {
1441 err = qlcnic_attach(adapter);
1443 __qlcnic_up(adapter, netdev);
1446 netif_device_attach(netdev);
1449 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1454 qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1455 struct net_device *netdev, u8 pci_using_dac)
1458 struct pci_dev *pdev = adapter->pdev;
1460 adapter->mc_enabled = 0;
1461 adapter->max_mc_count = 38;
1463 netdev->netdev_ops = &qlcnic_netdev_ops;
1464 netdev->watchdog_timeo = 5*HZ;
1466 qlcnic_change_mtu(netdev, netdev->mtu);
1468 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1470 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1471 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1473 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1474 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1476 netdev->hw_features |= NETIF_F_HIGHDMA;
1478 netdev->vlan_features = netdev->hw_features;
1480 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1481 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1482 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1483 netdev->hw_features |= NETIF_F_LRO;
1485 netdev->features |= netdev->hw_features |
1486 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1488 netdev->irq = adapter->msix_entries[0].vector;
1490 netif_carrier_off(netdev);
1492 err = register_netdev(netdev);
1494 dev_err(&pdev->dev, "failed to register net device\n");
1501 static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1503 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1504 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1506 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1507 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1510 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1518 qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1520 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1523 if (adapter->msix_entries)
1526 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1530 static int __devinit
1531 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1533 struct net_device *netdev = NULL;
1534 struct qlcnic_adapter *adapter = NULL;
1536 uint8_t revision_id;
1537 uint8_t pci_using_dac;
1538 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1540 err = pci_enable_device(pdev);
1544 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1546 goto err_out_disable_pdev;
1549 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1551 goto err_out_disable_pdev;
1553 err = pci_request_regions(pdev, qlcnic_driver_name);
1555 goto err_out_disable_pdev;
1557 pci_set_master(pdev);
1558 pci_enable_pcie_error_reporting(pdev);
1560 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1562 dev_err(&pdev->dev, "failed to allocate net_device\n");
1564 goto err_out_free_res;
1567 SET_NETDEV_DEV(netdev, &pdev->dev);
1569 adapter = netdev_priv(netdev);
1570 adapter->netdev = netdev;
1571 adapter->pdev = pdev;
1573 if (qlcnic_alloc_adapter_resources(adapter))
1574 goto err_out_free_netdev;
1576 adapter->dev_rst_time = jiffies;
1577 revision_id = pdev->revision;
1578 adapter->ahw->revision_id = revision_id;
1580 rwlock_init(&adapter->ahw->crb_lock);
1581 mutex_init(&adapter->ahw->mem_lock);
1583 spin_lock_init(&adapter->tx_clean_lock);
1584 INIT_LIST_HEAD(&adapter->mac_list);
1586 err = qlcnic_setup_pci_map(adapter);
1588 goto err_out_free_hw;
1590 /* This will be reset for mezz cards */
1591 adapter->portnum = adapter->ahw->pci_func;
1593 /* Get FW dump template and store it */
1594 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
1595 qlcnic_fw_cmd_get_minidump_temp(adapter);
1597 err = qlcnic_get_board_info(adapter);
1599 dev_err(&pdev->dev, "Error getting board config info.\n");
1600 goto err_out_iounmap;
1603 err = qlcnic_setup_idc_param(adapter);
1605 goto err_out_iounmap;
1607 adapter->flags |= QLCNIC_NEED_FLR;
1609 err = adapter->nic_ops->start_firmware(adapter);
1611 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1612 goto err_out_decr_ref;
1615 if (qlcnic_read_mac_addr(adapter))
1616 dev_warn(&pdev->dev, "failed to read mac addr\n");
1618 if (adapter->portnum == 0) {
1619 get_brd_name(adapter, brd_name);
1621 pr_info("%s: %s Board Chip rev 0x%x\n",
1622 module_name(THIS_MODULE),
1623 brd_name, adapter->ahw->revision_id);
1626 qlcnic_clear_stats(adapter);
1628 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1630 goto err_out_decr_ref;
1632 qlcnic_setup_intr(adapter);
1634 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1636 goto err_out_disable_msi;
1638 pci_set_drvdata(pdev, adapter);
1640 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1642 switch (adapter->ahw->port_type) {
1644 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1645 adapter->netdev->name);
1648 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1649 adapter->netdev->name);
1653 qlcnic_alloc_lb_filters_mem(adapter);
1654 qlcnic_create_diag_entries(adapter);
1658 err_out_disable_msi:
1659 qlcnic_teardown_intr(adapter);
1660 kfree(adapter->msix_entries);
1663 qlcnic_clr_all_drv_state(adapter, 0);
1666 qlcnic_cleanup_pci_map(adapter);
1669 qlcnic_free_adapter_resources(adapter);
1671 err_out_free_netdev:
1672 free_netdev(netdev);
1675 pci_release_regions(pdev);
1677 err_out_disable_pdev:
1678 pci_set_drvdata(pdev, NULL);
1679 pci_disable_device(pdev);
1683 static void __devexit qlcnic_remove(struct pci_dev *pdev)
1685 struct qlcnic_adapter *adapter;
1686 struct net_device *netdev;
1688 adapter = pci_get_drvdata(pdev);
1689 if (adapter == NULL)
1692 netdev = adapter->netdev;
1694 qlcnic_cancel_fw_work(adapter);
1696 unregister_netdev(netdev);
1698 qlcnic_detach(adapter);
1700 if (adapter->npars != NULL)
1701 kfree(adapter->npars);
1702 if (adapter->eswitch != NULL)
1703 kfree(adapter->eswitch);
1705 qlcnic_clr_all_drv_state(adapter, 0);
1707 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1709 qlcnic_free_lb_filters_mem(adapter);
1711 qlcnic_teardown_intr(adapter);
1712 kfree(adapter->msix_entries);
1714 qlcnic_remove_diag_entries(adapter);
1716 qlcnic_cleanup_pci_map(adapter);
1718 qlcnic_release_firmware(adapter);
1720 pci_disable_pcie_error_reporting(pdev);
1721 pci_release_regions(pdev);
1722 pci_disable_device(pdev);
1723 pci_set_drvdata(pdev, NULL);
1725 qlcnic_free_adapter_resources(adapter);
1726 free_netdev(netdev);
1728 static int __qlcnic_shutdown(struct pci_dev *pdev)
1730 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1731 struct net_device *netdev = adapter->netdev;
1734 netif_device_detach(netdev);
1736 qlcnic_cancel_fw_work(adapter);
1738 if (netif_running(netdev))
1739 qlcnic_down(adapter, netdev);
1741 qlcnic_clr_all_drv_state(adapter, 0);
1743 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1745 retval = pci_save_state(pdev);
1749 if (qlcnic_wol_supported(adapter)) {
1750 pci_enable_wake(pdev, PCI_D3cold, 1);
1751 pci_enable_wake(pdev, PCI_D3hot, 1);
1757 static void qlcnic_shutdown(struct pci_dev *pdev)
1759 if (__qlcnic_shutdown(pdev))
1762 pci_disable_device(pdev);
1767 qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1771 retval = __qlcnic_shutdown(pdev);
1775 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1780 qlcnic_resume(struct pci_dev *pdev)
1782 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1783 struct net_device *netdev = adapter->netdev;
1786 err = pci_enable_device(pdev);
1790 pci_set_power_state(pdev, PCI_D0);
1791 pci_set_master(pdev);
1792 pci_restore_state(pdev);
1794 err = adapter->nic_ops->start_firmware(adapter);
1796 dev_err(&pdev->dev, "failed to start firmware\n");
1800 if (netif_running(netdev)) {
1801 err = qlcnic_up(adapter, netdev);
1805 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1808 netif_device_attach(netdev);
1809 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1814 static int qlcnic_open(struct net_device *netdev)
1816 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1819 err = qlcnic_attach(adapter);
1823 err = __qlcnic_up(adapter, netdev);
1827 netif_start_queue(netdev);
1832 qlcnic_detach(adapter);
1837 * qlcnic_close - Disables a network interface entry point
1839 static int qlcnic_close(struct net_device *netdev)
1841 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1843 __qlcnic_down(adapter, netdev);
1848 qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1853 if (!qlcnic_mac_learn)
1856 spin_lock_init(&adapter->mac_learn_lock);
1858 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1863 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1864 adapter->fhash.fhead = (struct hlist_head *)head;
1866 for (i = 0; i < adapter->fhash.fmax; i++)
1867 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1870 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1872 if (adapter->fhash.fmax && adapter->fhash.fhead)
1873 kfree(adapter->fhash.fhead);
1875 adapter->fhash.fhead = NULL;
1876 adapter->fhash.fmax = 0;
1879 static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1880 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1882 struct cmd_desc_type0 *hwdesc;
1883 struct qlcnic_nic_req *req;
1884 struct qlcnic_mac_req *mac_req;
1885 struct qlcnic_vlan_req *vlan_req;
1889 producer = tx_ring->producer;
1890 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1892 req = (struct qlcnic_nic_req *)hwdesc;
1893 memset(req, 0, sizeof(struct qlcnic_nic_req));
1894 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1896 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1897 req->req_hdr = cpu_to_le64(word);
1899 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
1900 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1901 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1903 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1904 vlan_req->vlan_id = vlan_id;
1906 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1910 #define QLCNIC_MAC_HASH(MAC)\
1911 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1914 qlcnic_send_filter(struct qlcnic_adapter *adapter,
1915 struct qlcnic_host_tx_ring *tx_ring,
1916 struct cmd_desc_type0 *first_desc,
1917 struct sk_buff *skb)
1919 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1920 struct qlcnic_filter *fil, *tmp_fil;
1921 struct hlist_node *tmp_hnode, *n;
1922 struct hlist_head *head;
1927 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1930 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1933 /* Only NPAR capable devices support vlan based learning*/
1934 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1935 vlan_id = first_desc->vlan_TCI;
1936 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1937 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1938 head = &(adapter->fhash.fhead[hindex]);
1940 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1941 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1942 tmp_fil->vlan_id == vlan_id) {
1945 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1946 qlcnic_change_filter(adapter, src_addr, vlan_id,
1948 tmp_fil->ftime = jiffies;
1953 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1957 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
1959 fil->ftime = jiffies;
1960 fil->vlan_id = vlan_id;
1961 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1962 spin_lock(&adapter->mac_learn_lock);
1963 hlist_add_head(&(fil->fnode), head);
1964 adapter->fhash.fnum++;
1965 spin_unlock(&adapter->mac_learn_lock);
1969 qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
1970 struct cmd_desc_type0 *first_desc,
1971 struct sk_buff *skb)
1973 u8 opcode = 0, hdr_len = 0;
1974 u16 flags = 0, vlan_tci = 0;
1975 int copied, offset, copy_len;
1976 struct cmd_desc_type0 *hwdesc;
1977 struct vlan_ethhdr *vh;
1978 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1979 u16 protocol = ntohs(skb->protocol);
1980 u32 producer = tx_ring->producer;
1982 if (protocol == ETH_P_8021Q) {
1983 vh = (struct vlan_ethhdr *)skb->data;
1984 flags = FLAGS_VLAN_TAGGED;
1985 vlan_tci = vh->h_vlan_TCI;
1986 } else if (vlan_tx_tag_present(skb)) {
1987 flags = FLAGS_VLAN_OOB;
1988 vlan_tci = vlan_tx_tag_get(skb);
1990 if (unlikely(adapter->pvid)) {
1991 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1993 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
1996 flags = FLAGS_VLAN_OOB;
1997 vlan_tci = adapter->pvid;
2000 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2001 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2003 if (*(skb->data) & BIT_0) {
2005 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2007 opcode = TX_ETHER_PKT;
2008 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
2009 skb_shinfo(skb)->gso_size > 0) {
2011 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2013 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2014 first_desc->total_hdr_length = hdr_len;
2016 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2018 /* For LSO, we need to copy the MAC/IP/TCP headers into
2019 * the descriptor ring */
2023 if (flags & FLAGS_VLAN_OOB) {
2024 first_desc->total_hdr_length += VLAN_HLEN;
2025 first_desc->tcp_hdr_offset = VLAN_HLEN;
2026 first_desc->ip_hdr_offset = VLAN_HLEN;
2027 /* Only in case of TSO on vlan device */
2028 flags |= FLAGS_VLAN_TAGGED;
2030 /* Create a TSO vlan header template for firmware */
2032 hwdesc = &tx_ring->desc_head[producer];
2033 tx_ring->cmd_buf_arr[producer].skb = NULL;
2035 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2036 offset, hdr_len + VLAN_HLEN);
2038 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2039 skb_copy_from_linear_data(skb, vh, 12);
2040 vh->h_vlan_proto = htons(ETH_P_8021Q);
2041 vh->h_vlan_TCI = htons(vlan_tci);
2043 skb_copy_from_linear_data_offset(skb, 12,
2044 (char *)vh + 16, copy_len - 16);
2046 copied = copy_len - VLAN_HLEN;
2049 producer = get_next_index(producer, tx_ring->num_desc);
2052 while (copied < hdr_len) {
2054 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2055 offset, (hdr_len - copied));
2057 hwdesc = &tx_ring->desc_head[producer];
2058 tx_ring->cmd_buf_arr[producer].skb = NULL;
2060 skb_copy_from_linear_data_offset(skb, copied,
2061 (char *) hwdesc + offset, copy_len);
2066 producer = get_next_index(producer, tx_ring->num_desc);
2069 tx_ring->producer = producer;
2071 adapter->stats.lso_frames++;
2073 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2076 if (protocol == ETH_P_IP) {
2077 l4proto = ip_hdr(skb)->protocol;
2079 if (l4proto == IPPROTO_TCP)
2080 opcode = TX_TCP_PKT;
2081 else if (l4proto == IPPROTO_UDP)
2082 opcode = TX_UDP_PKT;
2083 } else if (protocol == ETH_P_IPV6) {
2084 l4proto = ipv6_hdr(skb)->nexthdr;
2086 if (l4proto == IPPROTO_TCP)
2087 opcode = TX_TCPV6_PKT;
2088 else if (l4proto == IPPROTO_UDP)
2089 opcode = TX_UDPV6_PKT;
2092 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2093 first_desc->ip_hdr_offset += skb_network_offset(skb);
2094 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2100 qlcnic_map_tx_skb(struct pci_dev *pdev,
2101 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2103 struct qlcnic_skb_frag *nf;
2104 struct skb_frag_struct *frag;
2108 nr_frags = skb_shinfo(skb)->nr_frags;
2109 nf = &pbuf->frag_array[0];
2111 map = pci_map_single(pdev, skb->data,
2112 skb_headlen(skb), PCI_DMA_TODEVICE);
2113 if (pci_dma_mapping_error(pdev, map))
2117 nf->length = skb_headlen(skb);
2119 for (i = 0; i < nr_frags; i++) {
2120 frag = &skb_shinfo(skb)->frags[i];
2121 nf = &pbuf->frag_array[i+1];
2123 map = pci_map_page(pdev, frag->page, frag->page_offset,
2124 frag->size, PCI_DMA_TODEVICE);
2125 if (pci_dma_mapping_error(pdev, map))
2129 nf->length = frag->size;
2136 nf = &pbuf->frag_array[i+1];
2137 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2140 nf = &pbuf->frag_array[0];
2141 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2148 qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2149 struct qlcnic_cmd_buffer *pbuf)
2151 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2152 int nr_frags = skb_shinfo(skb)->nr_frags;
2155 for (i = 0; i < nr_frags; i++) {
2156 nf = &pbuf->frag_array[i+1];
2157 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2160 nf = &pbuf->frag_array[0];
2161 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2166 qlcnic_clear_cmddesc(u64 *desc)
2174 qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2176 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2177 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2178 struct qlcnic_cmd_buffer *pbuf;
2179 struct qlcnic_skb_frag *buffrag;
2180 struct cmd_desc_type0 *hwdesc, *first_desc;
2181 struct pci_dev *pdev;
2182 struct ethhdr *phdr;
2188 u32 num_txd = tx_ring->num_desc;
2190 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2191 netif_stop_queue(netdev);
2192 return NETDEV_TX_BUSY;
2195 if (adapter->flags & QLCNIC_MACSPOOF) {
2196 phdr = (struct ethhdr *)skb->data;
2197 if (compare_ether_addr(phdr->h_source,
2202 frag_count = skb_shinfo(skb)->nr_frags + 1;
2203 /* 14 frags supported for normal packet and
2204 * 32 frags supported for TSO packet
2206 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2208 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2209 delta += skb_shinfo(skb)->frags[i].size;
2211 if (!__pskb_pull_tail(skb, delta))
2214 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2217 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
2218 netif_stop_queue(netdev);
2219 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2220 netif_start_queue(netdev);
2222 adapter->stats.xmit_off++;
2223 return NETDEV_TX_BUSY;
2227 producer = tx_ring->producer;
2228 pbuf = &tx_ring->cmd_buf_arr[producer];
2230 pdev = adapter->pdev;
2232 first_desc = hwdesc = &tx_ring->desc_head[producer];
2233 qlcnic_clear_cmddesc((u64 *)hwdesc);
2235 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2236 adapter->stats.tx_dma_map_error++;
2241 pbuf->frag_count = frag_count;
2243 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2244 qlcnic_set_tx_port(first_desc, adapter->portnum);
2246 for (i = 0; i < frag_count; i++) {
2250 if ((k == 0) && (i > 0)) {
2251 /* move to next desc.*/
2252 producer = get_next_index(producer, num_txd);
2253 hwdesc = &tx_ring->desc_head[producer];
2254 qlcnic_clear_cmddesc((u64 *)hwdesc);
2255 tx_ring->cmd_buf_arr[producer].skb = NULL;
2258 buffrag = &pbuf->frag_array[i];
2260 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2263 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2266 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2269 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2272 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2277 tx_ring->producer = get_next_index(producer, num_txd);
2280 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2283 if (qlcnic_mac_learn)
2284 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2286 qlcnic_update_cmd_producer(adapter, tx_ring);
2288 adapter->stats.txbytes += skb->len;
2289 adapter->stats.xmitcalled++;
2291 return NETDEV_TX_OK;
2294 qlcnic_unmap_buffers(pdev, skb, pbuf);
2296 adapter->stats.txdropped++;
2297 dev_kfree_skb_any(skb);
2298 return NETDEV_TX_OK;
2301 static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2303 struct net_device *netdev = adapter->netdev;
2304 u32 temp, temp_state, temp_val;
2307 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2309 temp_state = qlcnic_get_temp_state(temp);
2310 temp_val = qlcnic_get_temp_val(temp);
2312 if (temp_state == QLCNIC_TEMP_PANIC) {
2313 dev_err(&netdev->dev,
2314 "Device temperature %d degrees C exceeds"
2315 " maximum allowed. Hardware has been shut down.\n",
2318 } else if (temp_state == QLCNIC_TEMP_WARN) {
2319 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2320 dev_err(&netdev->dev,
2321 "Device temperature %d degrees C "
2322 "exceeds operating range."
2323 " Immediate action needed.\n",
2327 if (adapter->temp == QLCNIC_TEMP_WARN) {
2328 dev_info(&netdev->dev,
2329 "Device temperature is now %d degrees C"
2330 " in normal range.\n", temp_val);
2333 adapter->temp = temp_state;
2337 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2339 struct net_device *netdev = adapter->netdev;
2341 if (adapter->ahw->linkup && !linkup) {
2342 netdev_info(netdev, "NIC Link is down\n");
2343 adapter->ahw->linkup = 0;
2344 if (netif_running(netdev)) {
2345 netif_carrier_off(netdev);
2346 netif_stop_queue(netdev);
2348 } else if (!adapter->ahw->linkup && linkup) {
2349 netdev_info(netdev, "NIC Link is up\n");
2350 adapter->ahw->linkup = 1;
2351 if (netif_running(netdev)) {
2352 netif_carrier_on(netdev);
2353 netif_wake_queue(netdev);
2358 static void qlcnic_tx_timeout(struct net_device *netdev)
2360 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2362 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2365 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
2367 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
2368 adapter->need_fw_reset = 1;
2370 adapter->reset_context = 1;
2373 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2375 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2376 struct net_device_stats *stats = &netdev->stats;
2378 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2379 stats->tx_packets = adapter->stats.xmitfinished;
2380 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
2381 stats->tx_bytes = adapter->stats.txbytes;
2382 stats->rx_dropped = adapter->stats.rxdropped;
2383 stats->tx_dropped = adapter->stats.txdropped;
2388 static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
2392 status = readl(adapter->isr_int_vec);
2394 if (!(status & adapter->int_vec_bit))
2397 /* check interrupt state machine, to be sure */
2398 status = readl(adapter->crb_int_state_reg);
2399 if (!ISR_LEGACY_INT_TRIGGERED(status))
2402 writel(0xffffffff, adapter->tgt_status_reg);
2403 /* read twice to ensure write is flushed */
2404 readl(adapter->isr_int_vec);
2405 readl(adapter->isr_int_vec);
2410 static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2412 struct qlcnic_host_sds_ring *sds_ring = data;
2413 struct qlcnic_adapter *adapter = sds_ring->adapter;
2415 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2417 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2418 writel(0xffffffff, adapter->tgt_status_reg);
2422 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2426 adapter->diag_cnt++;
2427 qlcnic_enable_int(sds_ring);
2431 static irqreturn_t qlcnic_intr(int irq, void *data)
2433 struct qlcnic_host_sds_ring *sds_ring = data;
2434 struct qlcnic_adapter *adapter = sds_ring->adapter;
2436 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2439 napi_schedule(&sds_ring->napi);
2444 static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2446 struct qlcnic_host_sds_ring *sds_ring = data;
2447 struct qlcnic_adapter *adapter = sds_ring->adapter;
2449 /* clear interrupt */
2450 writel(0xffffffff, adapter->tgt_status_reg);
2452 napi_schedule(&sds_ring->napi);
2456 static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2458 struct qlcnic_host_sds_ring *sds_ring = data;
2460 napi_schedule(&sds_ring->napi);
2464 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2466 u32 sw_consumer, hw_consumer;
2468 struct qlcnic_cmd_buffer *buffer;
2469 struct pci_dev *pdev = adapter->pdev;
2470 struct net_device *netdev = adapter->netdev;
2471 struct qlcnic_skb_frag *frag;
2473 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2475 if (!spin_trylock(&adapter->tx_clean_lock))
2478 sw_consumer = tx_ring->sw_consumer;
2479 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2481 while (sw_consumer != hw_consumer) {
2482 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2484 frag = &buffer->frag_array[0];
2485 pci_unmap_single(pdev, frag->dma, frag->length,
2488 for (i = 1; i < buffer->frag_count; i++) {
2490 pci_unmap_page(pdev, frag->dma, frag->length,
2495 adapter->stats.xmitfinished++;
2496 dev_kfree_skb_any(buffer->skb);
2500 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2501 if (++count >= MAX_STATUS_HANDLE)
2505 if (count && netif_running(netdev)) {
2506 tx_ring->sw_consumer = sw_consumer;
2510 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2511 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2512 netif_wake_queue(netdev);
2513 adapter->stats.xmit_on++;
2516 adapter->tx_timeo_cnt = 0;
2519 * If everything is freed up to consumer then check if the ring is full
2520 * If the ring is full then check if more needs to be freed and
2521 * schedule the call back again.
2523 * This happens when there are 2 CPUs. One could be freeing and the
2524 * other filling it. If the ring is full when we get out of here and
2525 * the card has already interrupted the host then the host can miss the
2528 * There is still a possible race condition and the host could miss an
2529 * interrupt. The card has to take care of this.
2531 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2532 done = (sw_consumer == hw_consumer);
2533 spin_unlock(&adapter->tx_clean_lock);
2538 static int qlcnic_poll(struct napi_struct *napi, int budget)
2540 struct qlcnic_host_sds_ring *sds_ring =
2541 container_of(napi, struct qlcnic_host_sds_ring, napi);
2543 struct qlcnic_adapter *adapter = sds_ring->adapter;
2548 tx_complete = qlcnic_process_cmd_ring(adapter);
2550 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2552 if ((work_done < budget) && tx_complete) {
2553 napi_complete(&sds_ring->napi);
2554 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2555 qlcnic_enable_int(sds_ring);
2561 static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2563 struct qlcnic_host_sds_ring *sds_ring =
2564 container_of(napi, struct qlcnic_host_sds_ring, napi);
2566 struct qlcnic_adapter *adapter = sds_ring->adapter;
2569 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2571 if (work_done < budget) {
2572 napi_complete(&sds_ring->napi);
2573 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2574 qlcnic_enable_int(sds_ring);
2580 #ifdef CONFIG_NET_POLL_CONTROLLER
2581 static void qlcnic_poll_controller(struct net_device *netdev)
2584 struct qlcnic_host_sds_ring *sds_ring;
2585 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2586 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2588 disable_irq(adapter->irq);
2589 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2590 sds_ring = &recv_ctx->sds_rings[ring];
2591 qlcnic_intr(adapter->irq, sds_ring);
2593 enable_irq(adapter->irq);
2598 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2602 val = adapter->portnum & 0xf;
2603 val |= encoding << 7;
2604 val |= (jiffies - adapter->dev_rst_time) << 8;
2606 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2607 adapter->dev_rst_time = jiffies;
2611 qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
2615 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2616 state != QLCNIC_DEV_NEED_QUISCENT);
2618 if (qlcnic_api_lock(adapter))
2621 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2623 if (state == QLCNIC_DEV_NEED_RESET)
2624 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2625 else if (state == QLCNIC_DEV_NEED_QUISCENT)
2626 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
2628 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2630 qlcnic_api_unlock(adapter);
2636 qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2640 if (qlcnic_api_lock(adapter))
2643 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2644 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2645 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2647 qlcnic_api_unlock(adapter);
2653 qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2657 if (qlcnic_api_lock(adapter))
2660 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2661 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2662 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2665 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2666 dev_info(&adapter->pdev->dev,
2667 "Device state set to Failed. Please Reboot\n");
2668 } else if (!(val & 0x11111111))
2669 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2671 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2672 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2673 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2675 qlcnic_api_unlock(adapter);
2677 adapter->fw_fail_cnt = 0;
2678 clear_bit(__QLCNIC_START_FW, &adapter->state);
2679 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2682 /* Grab api lock, before checking state */
2684 qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2688 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2689 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2691 if (((state & 0x11111111) == (act & 0x11111111)) ||
2692 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2698 static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2700 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2702 if (val != QLCNIC_DRV_IDC_VER) {
2703 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2704 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2711 qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2713 u32 val, prev_state;
2714 u8 dev_init_timeo = adapter->dev_init_timeo;
2715 u8 portnum = adapter->portnum;
2718 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2721 if (qlcnic_api_lock(adapter))
2724 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2725 if (!(val & (1 << (portnum * 4)))) {
2726 QLC_DEV_SET_REF_CNT(val, portnum);
2727 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2730 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2731 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2733 switch (prev_state) {
2734 case QLCNIC_DEV_COLD:
2735 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2736 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2737 qlcnic_idc_debug_info(adapter, 0);
2738 qlcnic_api_unlock(adapter);
2741 case QLCNIC_DEV_READY:
2742 ret = qlcnic_check_idc_ver(adapter);
2743 qlcnic_api_unlock(adapter);
2746 case QLCNIC_DEV_NEED_RESET:
2747 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2748 QLC_DEV_SET_RST_RDY(val, portnum);
2749 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2752 case QLCNIC_DEV_NEED_QUISCENT:
2753 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2754 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2755 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2758 case QLCNIC_DEV_FAILED:
2759 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2760 qlcnic_api_unlock(adapter);
2763 case QLCNIC_DEV_INITIALIZING:
2764 case QLCNIC_DEV_QUISCENT:
2768 qlcnic_api_unlock(adapter);
2772 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2774 if (prev_state == QLCNIC_DEV_QUISCENT)
2776 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2778 if (!dev_init_timeo) {
2779 dev_err(&adapter->pdev->dev,
2780 "Waiting for device to initialize timeout\n");
2784 if (qlcnic_api_lock(adapter))
2787 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2788 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2789 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2791 ret = qlcnic_check_idc_ver(adapter);
2792 qlcnic_api_unlock(adapter);
2798 qlcnic_fwinit_work(struct work_struct *work)
2800 struct qlcnic_adapter *adapter = container_of(work,
2801 struct qlcnic_adapter, fw_work.work);
2802 u32 dev_state = 0xf;
2804 if (qlcnic_api_lock(adapter))
2807 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2808 if (dev_state == QLCNIC_DEV_QUISCENT ||
2809 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2810 qlcnic_api_unlock(adapter);
2811 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2816 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2817 qlcnic_api_unlock(adapter);
2821 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2822 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2823 adapter->reset_ack_timeo);
2824 goto skip_ack_check;
2827 if (!qlcnic_check_drv_state(adapter)) {
2829 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2831 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2832 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2833 QLCNIC_DEV_INITIALIZING);
2834 set_bit(__QLCNIC_START_FW, &adapter->state);
2835 QLCDB(adapter, DRV, "Restarting fw\n");
2836 qlcnic_idc_debug_info(adapter, 0);
2837 QLCDB(adapter, DRV, "Take FW dump\n");
2838 qlcnic_dump_fw(adapter);
2841 qlcnic_api_unlock(adapter);
2843 if (!adapter->nic_ops->start_firmware(adapter)) {
2844 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2845 adapter->fw_wait_cnt = 0;
2851 qlcnic_api_unlock(adapter);
2854 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2855 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2857 switch (dev_state) {
2858 case QLCNIC_DEV_READY:
2859 if (!adapter->nic_ops->start_firmware(adapter)) {
2860 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2861 adapter->fw_wait_cnt = 0;
2864 case QLCNIC_DEV_FAILED:
2867 qlcnic_schedule_work(adapter,
2868 qlcnic_fwinit_work, FW_POLL_DELAY);
2873 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2874 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2875 netif_device_attach(adapter->netdev);
2876 qlcnic_clr_all_drv_state(adapter, 0);
2880 qlcnic_detach_work(struct work_struct *work)
2882 struct qlcnic_adapter *adapter = container_of(work,
2883 struct qlcnic_adapter, fw_work.work);
2884 struct net_device *netdev = adapter->netdev;
2887 netif_device_detach(netdev);
2889 /* Dont grab rtnl lock during Quiscent mode */
2890 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2891 if (netif_running(netdev))
2892 __qlcnic_down(adapter, netdev);
2894 qlcnic_down(adapter, netdev);
2896 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2898 if (status & QLCNIC_RCODE_FATAL_ERROR)
2901 if (adapter->temp == QLCNIC_TEMP_PANIC)
2904 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2907 adapter->fw_wait_cnt = 0;
2909 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2914 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2915 status, adapter->temp);
2916 netif_device_attach(netdev);
2917 qlcnic_clr_all_drv_state(adapter, 1);
2920 /*Transit NPAR state to NON Operational */
2922 qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2926 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2927 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2930 if (qlcnic_api_lock(adapter))
2932 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2933 qlcnic_api_unlock(adapter);
2936 /*Transit to RESET state from READY state only */
2938 qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2942 adapter->need_fw_reset = 1;
2943 if (qlcnic_api_lock(adapter))
2946 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2948 if (state == QLCNIC_DEV_READY) {
2949 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2950 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2951 qlcnic_idc_debug_info(adapter, 0);
2954 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2955 qlcnic_api_unlock(adapter);
2958 /* Transit to NPAR READY state from NPAR NOT READY state */
2960 qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2962 if (qlcnic_api_lock(adapter))
2965 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2966 QLCDB(adapter, DRV, "NPAR operational state set\n");
2968 qlcnic_api_unlock(adapter);
2972 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2973 work_func_t func, int delay)
2975 if (test_bit(__QLCNIC_AER, &adapter->state))
2978 INIT_DELAYED_WORK(&adapter->fw_work, func);
2979 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2980 round_jiffies_relative(delay));
2984 qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2986 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2989 cancel_delayed_work_sync(&adapter->fw_work);
2993 qlcnic_attach_work(struct work_struct *work)
2995 struct qlcnic_adapter *adapter = container_of(work,
2996 struct qlcnic_adapter, fw_work.work);
2997 struct net_device *netdev = adapter->netdev;
3000 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3001 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3002 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3003 qlcnic_clr_all_drv_state(adapter, 0);
3004 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3005 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3009 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3013 if (netif_running(netdev)) {
3014 if (qlcnic_up(adapter, netdev))
3017 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3021 netif_device_attach(netdev);
3022 adapter->fw_fail_cnt = 0;
3023 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3025 if (!qlcnic_clr_drv_state(adapter))
3026 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3031 qlcnic_check_health(struct qlcnic_adapter *adapter)
3033 u32 state = 0, heartbeat;
3034 struct net_device *netdev = adapter->netdev;
3036 if (qlcnic_check_temp(adapter))
3039 if (adapter->need_fw_reset)
3040 qlcnic_dev_request_reset(adapter);
3042 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3043 if (state == QLCNIC_DEV_NEED_RESET) {
3044 qlcnic_set_npar_non_operational(adapter);
3045 adapter->need_fw_reset = 1;
3046 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3049 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3050 if (heartbeat != adapter->heartbeat) {
3051 adapter->heartbeat = heartbeat;
3052 adapter->fw_fail_cnt = 0;
3053 if (adapter->need_fw_reset)
3056 if (adapter->reset_context && auto_fw_reset) {
3057 qlcnic_reset_hw_context(adapter);
3058 adapter->netdev->trans_start = jiffies;
3064 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3067 qlcnic_dev_request_reset(adapter);
3070 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
3072 dev_info(&netdev->dev, "firmware hang detected\n");
3075 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3076 QLCNIC_DEV_NEED_RESET;
3078 if (auto_fw_reset &&
3079 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3081 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
3082 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3089 qlcnic_fw_poll_work(struct work_struct *work)
3091 struct qlcnic_adapter *adapter = container_of(work,
3092 struct qlcnic_adapter, fw_work.work);
3094 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3098 if (qlcnic_check_health(adapter))
3101 if (adapter->fhash.fnum)
3102 qlcnic_prune_lb_filters(adapter);
3105 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3108 static int qlcnic_is_first_func(struct pci_dev *pdev)
3110 struct pci_dev *oth_pdev;
3111 int val = pdev->devfn;
3114 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3115 (pdev->bus), pdev->bus->number,
3116 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
3120 if (oth_pdev->current_state != PCI_D3cold) {
3121 pci_dev_put(oth_pdev);
3124 pci_dev_put(oth_pdev);
3129 static int qlcnic_attach_func(struct pci_dev *pdev)
3131 int err, first_func;
3132 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3133 struct net_device *netdev = adapter->netdev;
3135 pdev->error_state = pci_channel_io_normal;
3137 err = pci_enable_device(pdev);
3141 pci_set_power_state(pdev, PCI_D0);
3142 pci_set_master(pdev);
3143 pci_restore_state(pdev);
3145 first_func = qlcnic_is_first_func(pdev);
3147 if (qlcnic_api_lock(adapter))
3150 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
3151 adapter->need_fw_reset = 1;
3152 set_bit(__QLCNIC_START_FW, &adapter->state);
3153 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3154 QLCDB(adapter, DRV, "Restarting fw\n");
3156 qlcnic_api_unlock(adapter);
3158 err = adapter->nic_ops->start_firmware(adapter);
3162 qlcnic_clr_drv_state(adapter);
3163 qlcnic_setup_intr(adapter);
3165 if (netif_running(netdev)) {
3166 err = qlcnic_attach(adapter);
3168 qlcnic_clr_all_drv_state(adapter, 1);
3169 clear_bit(__QLCNIC_AER, &adapter->state);
3170 netif_device_attach(netdev);
3174 err = qlcnic_up(adapter, netdev);
3178 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3181 netif_device_attach(netdev);
3185 static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3186 pci_channel_state_t state)
3188 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3189 struct net_device *netdev = adapter->netdev;
3191 if (state == pci_channel_io_perm_failure)
3192 return PCI_ERS_RESULT_DISCONNECT;
3194 if (state == pci_channel_io_normal)
3195 return PCI_ERS_RESULT_RECOVERED;
3197 set_bit(__QLCNIC_AER, &adapter->state);
3198 netif_device_detach(netdev);
3200 cancel_delayed_work_sync(&adapter->fw_work);
3202 if (netif_running(netdev))
3203 qlcnic_down(adapter, netdev);
3205 qlcnic_detach(adapter);
3206 qlcnic_teardown_intr(adapter);
3208 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3210 pci_save_state(pdev);
3211 pci_disable_device(pdev);
3213 return PCI_ERS_RESULT_NEED_RESET;
3216 static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3218 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3219 PCI_ERS_RESULT_RECOVERED;
3222 static void qlcnic_io_resume(struct pci_dev *pdev)
3224 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3226 pci_cleanup_aer_uncorrect_error_status(pdev);
3228 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3229 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3230 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3235 qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3239 err = qlcnic_can_start_firmware(adapter);
3243 err = qlcnic_check_npar_opertional(adapter);
3247 err = qlcnic_initialize_nic(adapter);
3251 qlcnic_check_options(adapter);
3253 err = qlcnic_set_eswitch_port_config(adapter);
3257 adapter->need_fw_reset = 0;
3263 qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3269 qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3275 qlcnic_store_bridged_mode(struct device *dev,
3276 struct device_attribute *attr, const char *buf, size_t len)
3278 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3282 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3285 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3288 if (strict_strtoul(buf, 2, &new))
3291 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
3299 qlcnic_show_bridged_mode(struct device *dev,
3300 struct device_attribute *attr, char *buf)
3302 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3303 int bridged_mode = 0;
3305 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3306 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3308 return sprintf(buf, "%d\n", bridged_mode);
3311 static struct device_attribute dev_attr_bridged_mode = {
3312 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3313 .show = qlcnic_show_bridged_mode,
3314 .store = qlcnic_store_bridged_mode,
3318 qlcnic_store_diag_mode(struct device *dev,
3319 struct device_attribute *attr, const char *buf, size_t len)
3321 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3324 if (strict_strtoul(buf, 2, &new))
3327 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3328 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3334 qlcnic_show_diag_mode(struct device *dev,
3335 struct device_attribute *attr, char *buf)
3337 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3339 return sprintf(buf, "%d\n",
3340 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3343 static struct device_attribute dev_attr_diag_mode = {
3344 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3345 .show = qlcnic_show_diag_mode,
3346 .store = qlcnic_store_diag_mode,
3349 int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3351 if (!use_msi_x && !use_msi) {
3352 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3356 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3357 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3358 " powers of 2\n", max_hw);
3365 int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3367 struct net_device *netdev = adapter->netdev;
3370 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3373 netif_device_detach(netdev);
3374 if (netif_running(netdev))
3375 __qlcnic_down(adapter, netdev);
3376 qlcnic_detach(adapter);
3377 qlcnic_teardown_intr(adapter);
3379 if (qlcnic_enable_msix(adapter, data)) {
3380 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3381 qlcnic_enable_msi_legacy(adapter);
3384 if (netif_running(netdev)) {
3385 err = qlcnic_attach(adapter);
3388 err = __qlcnic_up(adapter, netdev);
3391 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3394 netif_device_attach(netdev);
3395 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3400 qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3401 loff_t offset, size_t size)
3403 size_t crb_size = 4;
3405 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3408 if (offset < QLCNIC_PCI_CRBSPACE) {
3409 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3410 QLCNIC_PCI_CAMQM_END))
3416 if ((size != crb_size) || (offset & (crb_size-1)))
3423 qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3424 struct bin_attribute *attr,
3425 char *buf, loff_t offset, size_t size)
3427 struct device *dev = container_of(kobj, struct device, kobj);
3428 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3433 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3437 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3438 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3439 memcpy(buf, &qmdata, size);
3441 data = QLCRD32(adapter, offset);
3442 memcpy(buf, &data, size);
3448 qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3449 struct bin_attribute *attr,
3450 char *buf, loff_t offset, size_t size)
3452 struct device *dev = container_of(kobj, struct device, kobj);
3453 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3458 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3462 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3463 memcpy(&qmdata, buf, size);
3464 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3466 memcpy(&data, buf, size);
3467 QLCWR32(adapter, offset, data);
3473 qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3474 loff_t offset, size_t size)
3476 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3479 if ((size != 8) || (offset & 0x7))
3486 qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3487 struct bin_attribute *attr,
3488 char *buf, loff_t offset, size_t size)
3490 struct device *dev = container_of(kobj, struct device, kobj);
3491 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3495 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3499 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3502 memcpy(buf, &data, size);
3508 qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3509 struct bin_attribute *attr,
3510 char *buf, loff_t offset, size_t size)
3512 struct device *dev = container_of(kobj, struct device, kobj);
3513 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3517 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3521 memcpy(&data, buf, size);
3523 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3529 static struct bin_attribute bin_attr_crb = {
3530 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3532 .read = qlcnic_sysfs_read_crb,
3533 .write = qlcnic_sysfs_write_crb,
3536 static struct bin_attribute bin_attr_mem = {
3537 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3539 .read = qlcnic_sysfs_read_mem,
3540 .write = qlcnic_sysfs_write_mem,
3544 validate_pm_config(struct qlcnic_adapter *adapter,
3545 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3548 u8 src_pci_func, s_esw_id, d_esw_id;
3552 for (i = 0; i < count; i++) {
3553 src_pci_func = pm_cfg[i].pci_func;
3554 dest_pci_func = pm_cfg[i].dest_npar;
3555 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3556 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3557 return QL_STATUS_INVALID_PARAM;
3559 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3560 return QL_STATUS_INVALID_PARAM;
3562 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3563 return QL_STATUS_INVALID_PARAM;
3565 s_esw_id = adapter->npars[src_pci_func].phy_port;
3566 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3568 if (s_esw_id != d_esw_id)
3569 return QL_STATUS_INVALID_PARAM;
3577 qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3578 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3580 struct device *dev = container_of(kobj, struct device, kobj);
3581 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3582 struct qlcnic_pm_func_cfg *pm_cfg;
3583 u32 id, action, pci_func;
3584 int count, rem, i, ret;
3586 count = size / sizeof(struct qlcnic_pm_func_cfg);
3587 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3589 return QL_STATUS_INVALID_PARAM;
3591 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3593 ret = validate_pm_config(adapter, pm_cfg, count);
3596 for (i = 0; i < count; i++) {
3597 pci_func = pm_cfg[i].pci_func;
3598 action = !!pm_cfg[i].action;
3599 id = adapter->npars[pci_func].phy_port;
3600 ret = qlcnic_config_port_mirroring(adapter, id,
3606 for (i = 0; i < count; i++) {
3607 pci_func = pm_cfg[i].pci_func;
3608 id = adapter->npars[pci_func].phy_port;
3609 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
3610 adapter->npars[pci_func].dest_npar = id;
3616 qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3617 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3619 struct device *dev = container_of(kobj, struct device, kobj);
3620 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3621 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3624 if (size != sizeof(pm_cfg))
3625 return QL_STATUS_INVALID_PARAM;
3627 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3628 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3630 pm_cfg[i].action = adapter->npars[i].enable_pm;
3631 pm_cfg[i].dest_npar = 0;
3632 pm_cfg[i].pci_func = i;
3634 memcpy(buf, &pm_cfg, size);
3640 validate_esw_config(struct qlcnic_adapter *adapter,
3641 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3647 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
3649 for (i = 0; i < count; i++) {
3650 pci_func = esw_cfg[i].pci_func;
3651 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3652 return QL_STATUS_INVALID_PARAM;
3654 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3655 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3656 return QL_STATUS_INVALID_PARAM;
3658 switch (esw_cfg[i].op_mode) {
3659 case QLCNIC_PORT_DEFAULTS:
3660 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3661 QLCNIC_NON_PRIV_FUNC) {
3662 if (esw_cfg[i].mac_anti_spoof != 0)
3663 return QL_STATUS_INVALID_PARAM;
3664 if (esw_cfg[i].mac_override != 1)
3665 return QL_STATUS_INVALID_PARAM;
3666 if (esw_cfg[i].promisc_mode != 1)
3667 return QL_STATUS_INVALID_PARAM;
3670 case QLCNIC_ADD_VLAN:
3671 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3672 return QL_STATUS_INVALID_PARAM;
3673 if (!esw_cfg[i].op_type)
3674 return QL_STATUS_INVALID_PARAM;
3676 case QLCNIC_DEL_VLAN:
3677 if (!esw_cfg[i].op_type)
3678 return QL_STATUS_INVALID_PARAM;
3681 return QL_STATUS_INVALID_PARAM;
3688 qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3689 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3691 struct device *dev = container_of(kobj, struct device, kobj);
3692 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3693 struct qlcnic_esw_func_cfg *esw_cfg;
3694 struct qlcnic_npar_info *npar;
3695 int count, rem, i, ret;
3696 u8 pci_func, op_mode = 0;
3698 count = size / sizeof(struct qlcnic_esw_func_cfg);
3699 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3701 return QL_STATUS_INVALID_PARAM;
3703 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3704 ret = validate_esw_config(adapter, esw_cfg, count);
3708 for (i = 0; i < count; i++) {
3709 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3710 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3711 return QL_STATUS_INVALID_PARAM;
3713 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
3716 op_mode = esw_cfg[i].op_mode;
3717 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3718 esw_cfg[i].op_mode = op_mode;
3719 esw_cfg[i].pci_func = adapter->ahw->pci_func;
3721 switch (esw_cfg[i].op_mode) {
3722 case QLCNIC_PORT_DEFAULTS:
3723 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3725 case QLCNIC_ADD_VLAN:
3726 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3728 case QLCNIC_DEL_VLAN:
3729 esw_cfg[i].vlan_id = 0;
3730 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3735 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3738 for (i = 0; i < count; i++) {
3739 pci_func = esw_cfg[i].pci_func;
3740 npar = &adapter->npars[pci_func];
3741 switch (esw_cfg[i].op_mode) {
3742 case QLCNIC_PORT_DEFAULTS:
3743 npar->promisc_mode = esw_cfg[i].promisc_mode;
3744 npar->mac_override = esw_cfg[i].mac_override;
3745 npar->offload_flags = esw_cfg[i].offload_flags;
3746 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3747 npar->discard_tagged = esw_cfg[i].discard_tagged;
3749 case QLCNIC_ADD_VLAN:
3750 npar->pvid = esw_cfg[i].vlan_id;
3752 case QLCNIC_DEL_VLAN:
3762 qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3763 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3765 struct device *dev = container_of(kobj, struct device, kobj);
3766 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3767 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3770 if (size != sizeof(esw_cfg))
3771 return QL_STATUS_INVALID_PARAM;
3773 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3774 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3776 esw_cfg[i].pci_func = i;
3777 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3778 return QL_STATUS_INVALID_PARAM;
3780 memcpy(buf, &esw_cfg, size);
3786 validate_npar_config(struct qlcnic_adapter *adapter,
3787 struct qlcnic_npar_func_cfg *np_cfg, int count)
3791 for (i = 0; i < count; i++) {
3792 pci_func = np_cfg[i].pci_func;
3793 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3794 return QL_STATUS_INVALID_PARAM;
3796 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3797 return QL_STATUS_INVALID_PARAM;
3799 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3800 !IS_VALID_BW(np_cfg[i].max_bw))
3801 return QL_STATUS_INVALID_PARAM;
3807 qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3808 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3810 struct device *dev = container_of(kobj, struct device, kobj);
3811 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3812 struct qlcnic_info nic_info;
3813 struct qlcnic_npar_func_cfg *np_cfg;
3814 int i, count, rem, ret;
3817 count = size / sizeof(struct qlcnic_npar_func_cfg);
3818 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3820 return QL_STATUS_INVALID_PARAM;
3822 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3823 ret = validate_npar_config(adapter, np_cfg, count);
3827 for (i = 0; i < count ; i++) {
3828 pci_func = np_cfg[i].pci_func;
3829 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3832 nic_info.pci_func = pci_func;
3833 nic_info.min_tx_bw = np_cfg[i].min_bw;
3834 nic_info.max_tx_bw = np_cfg[i].max_bw;
3835 ret = qlcnic_set_nic_info(adapter, &nic_info);
3838 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3839 adapter->npars[i].max_bw = nic_info.max_tx_bw;
3846 qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3847 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3849 struct device *dev = container_of(kobj, struct device, kobj);
3850 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3851 struct qlcnic_info nic_info;
3852 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3855 if (size != sizeof(np_cfg))
3856 return QL_STATUS_INVALID_PARAM;
3858 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3859 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3861 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3865 np_cfg[i].pci_func = i;
3866 np_cfg[i].op_mode = (u8)nic_info.op_mode;
3867 np_cfg[i].port_num = nic_info.phys_port;
3868 np_cfg[i].fw_capab = nic_info.capabilities;
3869 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3870 np_cfg[i].max_bw = nic_info.max_tx_bw;
3871 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3872 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3874 memcpy(buf, &np_cfg, size);
3879 qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3880 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3882 struct device *dev = container_of(kobj, struct device, kobj);
3883 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3884 struct qlcnic_esw_statistics port_stats;
3887 if (size != sizeof(struct qlcnic_esw_statistics))
3888 return QL_STATUS_INVALID_PARAM;
3890 if (offset >= QLCNIC_MAX_PCI_FUNC)
3891 return QL_STATUS_INVALID_PARAM;
3893 memset(&port_stats, 0, size);
3894 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3899 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3904 memcpy(buf, &port_stats, size);
3909 qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3910 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3912 struct device *dev = container_of(kobj, struct device, kobj);
3913 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3914 struct qlcnic_esw_statistics esw_stats;
3917 if (size != sizeof(struct qlcnic_esw_statistics))
3918 return QL_STATUS_INVALID_PARAM;
3920 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3921 return QL_STATUS_INVALID_PARAM;
3923 memset(&esw_stats, 0, size);
3924 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3929 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3934 memcpy(buf, &esw_stats, size);
3939 qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3940 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3942 struct device *dev = container_of(kobj, struct device, kobj);
3943 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3946 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3947 return QL_STATUS_INVALID_PARAM;
3949 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3950 QLCNIC_QUERY_RX_COUNTER);
3954 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3955 QLCNIC_QUERY_TX_COUNTER);
3963 qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3964 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3967 struct device *dev = container_of(kobj, struct device, kobj);
3968 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3971 if (offset >= QLCNIC_MAX_PCI_FUNC)
3972 return QL_STATUS_INVALID_PARAM;
3974 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3975 QLCNIC_QUERY_RX_COUNTER);
3979 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3980 QLCNIC_QUERY_TX_COUNTER);
3988 qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3989 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3991 struct device *dev = container_of(kobj, struct device, kobj);
3992 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3993 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3994 struct qlcnic_pci_info *pci_info;
3997 if (size != sizeof(pci_cfg))
3998 return QL_STATUS_INVALID_PARAM;
4000 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4004 ret = qlcnic_get_pci_info(adapter, pci_info);
4010 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4011 pci_cfg[i].pci_func = pci_info[i].id;
4012 pci_cfg[i].func_type = pci_info[i].type;
4013 pci_cfg[i].port_num = pci_info[i].default_port;
4014 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4015 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4016 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4018 memcpy(buf, &pci_cfg, size);
4022 static struct bin_attribute bin_attr_npar_config = {
4023 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4025 .read = qlcnic_sysfs_read_npar_config,
4026 .write = qlcnic_sysfs_write_npar_config,
4029 static struct bin_attribute bin_attr_pci_config = {
4030 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4032 .read = qlcnic_sysfs_read_pci_config,
4036 static struct bin_attribute bin_attr_port_stats = {
4037 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4039 .read = qlcnic_sysfs_get_port_stats,
4040 .write = qlcnic_sysfs_clear_port_stats,
4043 static struct bin_attribute bin_attr_esw_stats = {
4044 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4046 .read = qlcnic_sysfs_get_esw_stats,
4047 .write = qlcnic_sysfs_clear_esw_stats,
4050 static struct bin_attribute bin_attr_esw_config = {
4051 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4053 .read = qlcnic_sysfs_read_esw_config,
4054 .write = qlcnic_sysfs_write_esw_config,
4057 static struct bin_attribute bin_attr_pm_config = {
4058 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4060 .read = qlcnic_sysfs_read_pm_config,
4061 .write = qlcnic_sysfs_write_pm_config,
4065 qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4067 struct device *dev = &adapter->pdev->dev;
4069 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4070 if (device_create_file(dev, &dev_attr_bridged_mode))
4072 "failed to create bridged_mode sysfs entry\n");
4076 qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4078 struct device *dev = &adapter->pdev->dev;
4080 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4081 device_remove_file(dev, &dev_attr_bridged_mode);
4085 qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4087 struct device *dev = &adapter->pdev->dev;
4089 if (device_create_bin_file(dev, &bin_attr_port_stats))
4090 dev_info(dev, "failed to create port stats sysfs entry");
4092 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4094 if (device_create_file(dev, &dev_attr_diag_mode))
4095 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4096 if (device_create_bin_file(dev, &bin_attr_crb))
4097 dev_info(dev, "failed to create crb sysfs entry\n");
4098 if (device_create_bin_file(dev, &bin_attr_mem))
4099 dev_info(dev, "failed to create mem sysfs entry\n");
4100 if (device_create_bin_file(dev, &bin_attr_pci_config))
4101 dev_info(dev, "failed to create pci config sysfs entry");
4102 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4104 if (device_create_bin_file(dev, &bin_attr_esw_config))
4105 dev_info(dev, "failed to create esw config sysfs entry");
4106 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4108 if (device_create_bin_file(dev, &bin_attr_npar_config))
4109 dev_info(dev, "failed to create npar config sysfs entry");
4110 if (device_create_bin_file(dev, &bin_attr_pm_config))
4111 dev_info(dev, "failed to create pm config sysfs entry");
4112 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4113 dev_info(dev, "failed to create eswitch stats sysfs entry");
4117 qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4119 struct device *dev = &adapter->pdev->dev;
4121 device_remove_bin_file(dev, &bin_attr_port_stats);
4123 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4125 device_remove_file(dev, &dev_attr_diag_mode);
4126 device_remove_bin_file(dev, &bin_attr_crb);
4127 device_remove_bin_file(dev, &bin_attr_mem);
4128 device_remove_bin_file(dev, &bin_attr_pci_config);
4129 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4131 device_remove_bin_file(dev, &bin_attr_esw_config);
4132 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4134 device_remove_bin_file(dev, &bin_attr_npar_config);
4135 device_remove_bin_file(dev, &bin_attr_pm_config);
4136 device_remove_bin_file(dev, &bin_attr_esw_stats);
4141 #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4144 qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4145 struct net_device *dev, unsigned long event)
4147 struct in_device *indev;
4149 indev = in_dev_get(dev);
4156 qlcnic_config_ipaddr(adapter,
4157 ifa->ifa_address, QLCNIC_IP_UP);
4160 qlcnic_config_ipaddr(adapter,
4161 ifa->ifa_address, QLCNIC_IP_DOWN);
4166 } endfor_ifa(indev);
4172 qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4174 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4175 struct net_device *dev;
4178 qlcnic_config_indev_addr(adapter, netdev, event);
4180 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4181 dev = vlan_find_dev(netdev, vid);
4184 qlcnic_config_indev_addr(adapter, dev, event);
4188 static int qlcnic_netdev_event(struct notifier_block *this,
4189 unsigned long event, void *ptr)
4191 struct qlcnic_adapter *adapter;
4192 struct net_device *dev = (struct net_device *)ptr;
4198 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4199 dev = vlan_dev_real_dev(dev);
4203 if (!is_qlcnic_netdev(dev))
4206 adapter = netdev_priv(dev);
4211 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4214 qlcnic_config_indev_addr(adapter, dev, event);
4220 qlcnic_inetaddr_event(struct notifier_block *this,
4221 unsigned long event, void *ptr)
4223 struct qlcnic_adapter *adapter;
4224 struct net_device *dev;
4226 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4228 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4234 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4235 dev = vlan_dev_real_dev(dev);
4239 if (!is_qlcnic_netdev(dev))
4242 adapter = netdev_priv(dev);
4247 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4252 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4255 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4265 static struct notifier_block qlcnic_netdev_cb = {
4266 .notifier_call = qlcnic_netdev_event,
4269 static struct notifier_block qlcnic_inetaddr_cb = {
4270 .notifier_call = qlcnic_inetaddr_event,
4274 qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
4277 static struct pci_error_handlers qlcnic_err_handler = {
4278 .error_detected = qlcnic_io_error_detected,
4279 .slot_reset = qlcnic_io_slot_reset,
4280 .resume = qlcnic_io_resume,
4283 static struct pci_driver qlcnic_driver = {
4284 .name = qlcnic_driver_name,
4285 .id_table = qlcnic_pci_tbl,
4286 .probe = qlcnic_probe,
4287 .remove = __devexit_p(qlcnic_remove),
4289 .suspend = qlcnic_suspend,
4290 .resume = qlcnic_resume,
4292 .shutdown = qlcnic_shutdown,
4293 .err_handler = &qlcnic_err_handler
4297 static int __init qlcnic_init_module(void)
4301 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4303 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4304 if (qlcnic_wq == NULL) {
4305 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4310 register_netdevice_notifier(&qlcnic_netdev_cb);
4311 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4314 ret = pci_register_driver(&qlcnic_driver);
4317 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4318 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4320 destroy_workqueue(qlcnic_wq);
4326 module_init(qlcnic_init_module);
4328 static void __exit qlcnic_exit_module(void)
4331 pci_unregister_driver(&qlcnic_driver);
4334 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4335 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4337 destroy_workqueue(qlcnic_wq);
4340 module_exit(qlcnic_exit_module);