2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
11 qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
17 /* give atleast 1ms for firmware to respond */
20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
21 return QLCNIC_CDRP_RSP_TIMEOUT;
23 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
24 } while (!QLCNIC_CDRP_IS_RSP(rsp));
30 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
31 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
35 u32 rcode = QLCNIC_RCODE_SUCCESS;
36 struct pci_dev *pdev = adapter->pdev;
38 signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
40 /* Acquire semaphore before accessing CRB */
41 if (qlcnic_api_lock(adapter))
42 return QLCNIC_RCODE_TIMEOUT;
44 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
45 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
46 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
47 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
48 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
50 rsp = qlcnic_poll_rsp(adapter);
52 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
53 dev_err(&pdev->dev, "card response timeout.\n");
54 rcode = QLCNIC_RCODE_TIMEOUT;
55 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
56 rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
57 dev_err(&pdev->dev, "failed card response code:0x%x\n",
61 /* Release semaphore */
62 qlcnic_api_unlock(adapter);
68 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
70 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
72 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
73 if (qlcnic_issue_cmd(adapter,
74 adapter->ahw->pci_func,
75 adapter->fw_hal_version,
79 QLCNIC_CDRP_CMD_SET_MTU)) {
81 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
90 qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
93 struct qlcnic_hostrq_rx_ctx *prq;
94 struct qlcnic_cardrsp_rx_ctx *prsp;
95 struct qlcnic_hostrq_rds_ring *prq_rds;
96 struct qlcnic_hostrq_sds_ring *prq_sds;
97 struct qlcnic_cardrsp_rds_ring *prsp_rds;
98 struct qlcnic_cardrsp_sds_ring *prsp_sds;
99 struct qlcnic_host_rds_ring *rds_ring;
100 struct qlcnic_host_sds_ring *sds_ring;
102 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
105 u8 i, nrds_rings, nsds_rings;
106 size_t rq_size, rsp_size;
107 u32 cap, reg, val, reg2;
110 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
112 nrds_rings = adapter->max_rds_rings;
113 nsds_rings = adapter->max_sds_rings;
116 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
119 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
122 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
123 &hostrq_phys_addr, GFP_KERNEL);
126 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
128 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
129 &cardrsp_phys_addr, GFP_KERNEL);
134 prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
136 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
138 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
139 | QLCNIC_CAP0_VALIDOFF);
140 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
142 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
144 prq->txrx_sds_binding = nsds_rings - 1;
146 prq->capabilities[0] = cpu_to_le32(cap);
147 prq->host_int_crb_mode =
148 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
149 prq->host_rds_crb_mode =
150 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
152 prq->num_rds_rings = cpu_to_le16(nrds_rings);
153 prq->num_sds_rings = cpu_to_le16(nsds_rings);
154 prq->rds_ring_offset = 0;
156 val = le32_to_cpu(prq->rds_ring_offset) +
157 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
158 prq->sds_ring_offset = cpu_to_le32(val);
160 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
161 le32_to_cpu(prq->rds_ring_offset));
163 for (i = 0; i < nrds_rings; i++) {
165 rds_ring = &recv_ctx->rds_rings[i];
166 rds_ring->producer = 0;
168 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
169 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
170 prq_rds[i].ring_kind = cpu_to_le32(i);
171 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
174 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
175 le32_to_cpu(prq->sds_ring_offset));
177 for (i = 0; i < nsds_rings; i++) {
179 sds_ring = &recv_ctx->sds_rings[i];
180 sds_ring->consumer = 0;
181 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
183 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
184 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
185 prq_sds[i].msi_index = cpu_to_le16(i);
188 phys_addr = hostrq_phys_addr;
189 err = qlcnic_issue_cmd(adapter,
190 adapter->ahw->pci_func,
191 adapter->fw_hal_version,
192 (u32)(phys_addr >> 32),
193 (u32)(phys_addr & 0xffffffff),
195 QLCNIC_CDRP_CMD_CREATE_RX_CTX);
197 dev_err(&adapter->pdev->dev,
198 "Failed to create rx ctx in firmware%d\n", err);
203 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
204 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
206 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
207 rds_ring = &recv_ctx->rds_rings[i];
209 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
210 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
213 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
214 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
216 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
217 sds_ring = &recv_ctx->sds_rings[i];
219 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
220 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
222 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
223 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
226 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
227 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
228 recv_ctx->virt_port = prsp->virt_port;
231 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
234 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
239 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
241 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
243 if (qlcnic_issue_cmd(adapter,
244 adapter->ahw->pci_func,
245 adapter->fw_hal_version,
246 recv_ctx->context_id,
247 QLCNIC_DESTROY_CTX_RESET,
249 QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
251 dev_err(&adapter->pdev->dev,
252 "Failed to destroy rx ctx in firmware\n");
255 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
259 qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
261 struct qlcnic_hostrq_tx_ctx *prq;
262 struct qlcnic_hostrq_cds_ring *prq_cds;
263 struct qlcnic_cardrsp_tx_ctx *prsp;
264 void *rq_addr, *rsp_addr;
265 size_t rq_size, rsp_size;
269 dma_addr_t rq_phys_addr, rsp_phys_addr;
270 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
272 /* reset host resources */
273 tx_ring->producer = 0;
274 tx_ring->sw_consumer = 0;
275 *(tx_ring->hw_consumer) = 0;
277 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
278 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
279 &rq_phys_addr, GFP_KERNEL);
283 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
284 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
285 &rsp_phys_addr, GFP_KERNEL);
291 memset(rq_addr, 0, rq_size);
292 prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
294 memset(rsp_addr, 0, rsp_size);
295 prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
297 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
299 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
301 prq->capabilities[0] = cpu_to_le32(temp);
303 prq->host_int_crb_mode =
304 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
306 prq->interrupt_ctl = 0;
308 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
310 prq_cds = &prq->cds_ring;
312 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
313 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
315 phys_addr = rq_phys_addr;
316 err = qlcnic_issue_cmd(adapter,
317 adapter->ahw->pci_func,
318 adapter->fw_hal_version,
319 (u32)(phys_addr >> 32),
320 ((u32)phys_addr & 0xffffffff),
322 QLCNIC_CDRP_CMD_CREATE_TX_CTX);
324 if (err == QLCNIC_RCODE_SUCCESS) {
325 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
326 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
328 adapter->tx_context_id =
329 le16_to_cpu(prsp->context_id);
331 dev_err(&adapter->pdev->dev,
332 "Failed to create tx ctx in firmware%d\n", err);
336 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
340 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
346 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
348 if (qlcnic_issue_cmd(adapter,
349 adapter->ahw->pci_func,
350 adapter->fw_hal_version,
351 adapter->tx_context_id,
352 QLCNIC_DESTROY_CTX_RESET,
354 QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
356 dev_err(&adapter->pdev->dev,
357 "Failed to destroy tx ctx in firmware\n");
362 qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
365 if (qlcnic_issue_cmd(adapter,
366 adapter->ahw->pci_func,
367 adapter->fw_hal_version,
371 QLCNIC_CDRP_CMD_READ_PHY)) {
376 return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
380 qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
382 return qlcnic_issue_cmd(adapter,
383 adapter->ahw->pci_func,
384 adapter->fw_hal_version,
388 QLCNIC_CDRP_CMD_WRITE_PHY);
391 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
396 struct qlcnic_recv_context *recv_ctx;
397 struct qlcnic_host_rds_ring *rds_ring;
398 struct qlcnic_host_sds_ring *sds_ring;
399 struct qlcnic_host_tx_ring *tx_ring;
401 struct pci_dev *pdev = adapter->pdev;
403 recv_ctx = adapter->recv_ctx;
404 tx_ring = adapter->tx_ring;
406 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
407 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
408 if (tx_ring->hw_consumer == NULL) {
409 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
414 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
415 &tx_ring->phys_addr, GFP_KERNEL);
418 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
423 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
425 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
426 rds_ring = &recv_ctx->rds_rings[ring];
427 addr = dma_alloc_coherent(&adapter->pdev->dev,
428 RCV_DESC_RINGSIZE(rds_ring),
429 &rds_ring->phys_addr, GFP_KERNEL);
432 "failed to allocate rds ring [%d]\n", ring);
436 rds_ring->desc_head = (struct rcv_desc *)addr;
440 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
441 sds_ring = &recv_ctx->sds_rings[ring];
443 addr = dma_alloc_coherent(&adapter->pdev->dev,
444 STATUS_DESC_RINGSIZE(sds_ring),
445 &sds_ring->phys_addr, GFP_KERNEL);
448 "failed to allocate sds ring [%d]\n", ring);
452 sds_ring->desc_head = (struct status_desc *)addr;
458 qlcnic_free_hw_resources(adapter);
463 int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
467 if (adapter->flags & QLCNIC_NEED_FLR) {
468 pci_reset_function(adapter->pdev);
469 adapter->flags &= ~QLCNIC_NEED_FLR;
472 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
476 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
478 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
482 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
486 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
488 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
489 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
490 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
492 /* Allow dma queues to drain after context reset */
497 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
499 struct qlcnic_recv_context *recv_ctx;
500 struct qlcnic_host_rds_ring *rds_ring;
501 struct qlcnic_host_sds_ring *sds_ring;
502 struct qlcnic_host_tx_ring *tx_ring;
505 recv_ctx = adapter->recv_ctx;
507 tx_ring = adapter->tx_ring;
508 if (tx_ring->hw_consumer != NULL) {
509 dma_free_coherent(&adapter->pdev->dev,
511 tx_ring->hw_consumer,
512 tx_ring->hw_cons_phys_addr);
513 tx_ring->hw_consumer = NULL;
516 if (tx_ring->desc_head != NULL) {
517 dma_free_coherent(&adapter->pdev->dev,
518 TX_DESC_RINGSIZE(tx_ring),
519 tx_ring->desc_head, tx_ring->phys_addr);
520 tx_ring->desc_head = NULL;
523 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
524 rds_ring = &recv_ctx->rds_rings[ring];
526 if (rds_ring->desc_head != NULL) {
527 dma_free_coherent(&adapter->pdev->dev,
528 RCV_DESC_RINGSIZE(rds_ring),
530 rds_ring->phys_addr);
531 rds_ring->desc_head = NULL;
535 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
536 sds_ring = &recv_ctx->sds_rings[ring];
538 if (sds_ring->desc_head != NULL) {
539 dma_free_coherent(&adapter->pdev->dev,
540 STATUS_DESC_RINGSIZE(sds_ring),
542 sds_ring->phys_addr);
543 sds_ring->desc_head = NULL;
549 /* Get MAC address of a NIC partition */
550 int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
555 arg1 = adapter->ahw->pci_func | BIT_8;
556 err = qlcnic_issue_cmd(adapter,
557 adapter->ahw->pci_func,
558 adapter->fw_hal_version,
562 QLCNIC_CDRP_CMD_MAC_ADDRESS);
564 if (err == QLCNIC_RCODE_SUCCESS)
565 qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
566 QLCNIC_ARG2_CRB_OFFSET, 0, mac);
568 dev_err(&adapter->pdev->dev,
569 "Failed to get mac address%d\n", err);
576 /* Get info of a NIC partition */
577 int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
578 struct qlcnic_info *npar_info, u8 func_id)
581 dma_addr_t nic_dma_t;
582 struct qlcnic_info *nic_info;
584 size_t nic_size = sizeof(struct qlcnic_info);
586 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
587 &nic_dma_t, GFP_KERNEL);
590 memset(nic_info_addr, 0, nic_size);
592 nic_info = (struct qlcnic_info *) nic_info_addr;
593 err = qlcnic_issue_cmd(adapter,
594 adapter->ahw->pci_func,
595 adapter->fw_hal_version,
598 (func_id << 16 | nic_size),
599 QLCNIC_CDRP_CMD_GET_NIC_INFO);
601 if (err == QLCNIC_RCODE_SUCCESS) {
602 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
603 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
604 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
605 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
606 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
607 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
608 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
609 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
610 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
611 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
613 dev_info(&adapter->pdev->dev,
614 "phy port: %d switch_mode: %d,\n"
615 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
616 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
617 npar_info->phys_port, npar_info->switch_mode,
618 npar_info->max_tx_ques, npar_info->max_rx_ques,
619 npar_info->min_tx_bw, npar_info->max_tx_bw,
620 npar_info->max_mtu, npar_info->capabilities);
622 dev_err(&adapter->pdev->dev,
623 "Failed to get nic info%d\n", err);
627 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
632 /* Configure a NIC partition */
633 int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
636 dma_addr_t nic_dma_t;
638 struct qlcnic_info *nic_info;
639 size_t nic_size = sizeof(struct qlcnic_info);
641 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
644 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
645 &nic_dma_t, GFP_KERNEL);
649 memset(nic_info_addr, 0, nic_size);
650 nic_info = (struct qlcnic_info *)nic_info_addr;
652 nic_info->pci_func = cpu_to_le16(nic->pci_func);
653 nic_info->op_mode = cpu_to_le16(nic->op_mode);
654 nic_info->phys_port = cpu_to_le16(nic->phys_port);
655 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
656 nic_info->capabilities = cpu_to_le32(nic->capabilities);
657 nic_info->max_mac_filters = nic->max_mac_filters;
658 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
659 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
660 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
661 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
663 err = qlcnic_issue_cmd(adapter,
664 adapter->ahw->pci_func,
665 adapter->fw_hal_version,
668 ((nic->pci_func << 16) | nic_size),
669 QLCNIC_CDRP_CMD_SET_NIC_INFO);
671 if (err != QLCNIC_RCODE_SUCCESS) {
672 dev_err(&adapter->pdev->dev,
673 "Failed to set nic info%d\n", err);
677 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
682 /* Get PCI Info of a partition */
683 int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
684 struct qlcnic_pci_info *pci_info)
687 dma_addr_t pci_info_dma_t;
688 struct qlcnic_pci_info *npar;
690 size_t npar_size = sizeof(struct qlcnic_pci_info);
691 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
693 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
694 &pci_info_dma_t, GFP_KERNEL);
697 memset(pci_info_addr, 0, pci_size);
699 npar = (struct qlcnic_pci_info *) pci_info_addr;
700 err = qlcnic_issue_cmd(adapter,
701 adapter->ahw->pci_func,
702 adapter->fw_hal_version,
706 QLCNIC_CDRP_CMD_GET_PCI_INFO);
708 if (err == QLCNIC_RCODE_SUCCESS) {
709 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
710 pci_info->id = le16_to_cpu(npar->id);
711 pci_info->active = le16_to_cpu(npar->active);
712 pci_info->type = le16_to_cpu(npar->type);
713 pci_info->default_port =
714 le16_to_cpu(npar->default_port);
715 pci_info->tx_min_bw =
716 le16_to_cpu(npar->tx_min_bw);
717 pci_info->tx_max_bw =
718 le16_to_cpu(npar->tx_max_bw);
719 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
722 dev_err(&adapter->pdev->dev,
723 "Failed to get PCI Info%d\n", err);
727 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
732 /* Configure eSwitch for port mirroring */
733 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
734 u8 enable_mirroring, u8 pci_func)
739 if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
740 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
743 arg1 = id | (enable_mirroring ? BIT_4 : 0);
744 arg1 |= pci_func << 8;
746 err = qlcnic_issue_cmd(adapter,
747 adapter->ahw->pci_func,
748 adapter->fw_hal_version,
752 QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
754 if (err != QLCNIC_RCODE_SUCCESS) {
755 dev_err(&adapter->pdev->dev,
756 "Failed to configure port mirroring%d on eswitch:%d\n",
759 dev_info(&adapter->pdev->dev,
760 "Configured eSwitch %d for port mirroring:%d\n",
767 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
768 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
770 size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
771 struct __qlcnic_esw_statistics *stats;
772 dma_addr_t stats_dma_t;
777 if (esw_stats == NULL)
780 if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
781 func != adapter->ahw->pci_func) {
782 dev_err(&adapter->pdev->dev,
783 "Not privilege to query stats for func=%d", func);
787 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
788 &stats_dma_t, GFP_KERNEL);
790 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
793 memset(stats_addr, 0, stats_size);
795 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
796 arg1 |= rx_tx << 15 | stats_size << 16;
798 err = qlcnic_issue_cmd(adapter,
799 adapter->ahw->pci_func,
800 adapter->fw_hal_version,
804 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
807 stats = (struct __qlcnic_esw_statistics *)stats_addr;
808 esw_stats->context_id = le16_to_cpu(stats->context_id);
809 esw_stats->version = le16_to_cpu(stats->version);
810 esw_stats->size = le16_to_cpu(stats->size);
811 esw_stats->multicast_frames =
812 le64_to_cpu(stats->multicast_frames);
813 esw_stats->broadcast_frames =
814 le64_to_cpu(stats->broadcast_frames);
815 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
816 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
817 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
818 esw_stats->errors = le64_to_cpu(stats->errors);
819 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
822 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
827 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
828 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
830 struct __qlcnic_esw_statistics port_stats;
834 if (esw_stats == NULL)
836 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
838 if (adapter->npars == NULL)
841 memset(esw_stats, 0, sizeof(u64));
842 esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
843 esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
844 esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
845 esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
846 esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
847 esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
848 esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
849 esw_stats->context_id = eswitch;
851 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
852 if (adapter->npars[i].phy_port != eswitch)
855 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
856 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
859 esw_stats->size = port_stats.size;
860 esw_stats->version = port_stats.version;
861 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
862 port_stats.unicast_frames);
863 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
864 port_stats.multicast_frames);
865 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
866 port_stats.broadcast_frames);
867 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
868 port_stats.dropped_frames);
869 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
871 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
872 port_stats.local_frames);
873 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
874 port_stats.numbytes);
880 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
881 const u8 port, const u8 rx_tx)
886 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
889 if (func_esw == QLCNIC_STATS_PORT) {
890 if (port >= QLCNIC_MAX_PCI_FUNC)
892 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
893 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
899 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
902 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
903 arg1 |= BIT_14 | rx_tx << 15;
905 return qlcnic_issue_cmd(adapter,
906 adapter->ahw->pci_func,
907 adapter->fw_hal_version,
911 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
914 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
915 "rx_ctx=%d\n", func_esw, port, rx_tx);
920 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
921 u32 *arg1, u32 *arg2)
925 pci_func = (*arg1 >> 8);
926 err = qlcnic_issue_cmd(adapter,
927 adapter->ahw->pci_func,
928 adapter->fw_hal_version,
932 QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
934 if (err == QLCNIC_RCODE_SUCCESS) {
935 *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
936 *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
937 dev_info(&adapter->pdev->dev,
938 "eSwitch port config for pci func %d\n", pci_func);
940 dev_err(&adapter->pdev->dev,
941 "Failed to get eswitch port config for pci func %d\n",
946 /* Configure eSwitch port
947 op_mode = 0 for setting default port behavior
948 op_mode = 1 for setting vlan id
949 op_mode = 2 for deleting vlan id
950 op_type = 0 for vlan_id
951 op_type = 1 for port vlan_id
953 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
954 struct qlcnic_esw_func_cfg *esw_cfg)
960 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
962 pci_func = esw_cfg->pci_func;
963 arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
964 arg1 |= (pci_func << 8);
966 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
968 arg1 &= ~(0x0ff << 8);
969 arg1 |= (pci_func << 8);
970 arg1 &= ~(BIT_2 | BIT_3);
971 switch (esw_cfg->op_mode) {
972 case QLCNIC_PORT_DEFAULTS:
973 arg1 |= (BIT_4 | BIT_6 | BIT_7);
974 arg2 |= (BIT_0 | BIT_1);
975 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
976 arg2 |= (BIT_2 | BIT_3);
977 if (!(esw_cfg->discard_tagged))
979 if (!(esw_cfg->promisc_mode))
981 if (!(esw_cfg->mac_override))
983 if (!(esw_cfg->mac_anti_spoof))
985 if (!(esw_cfg->offload_flags & BIT_0))
986 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
987 if (!(esw_cfg->offload_flags & BIT_1))
989 if (!(esw_cfg->offload_flags & BIT_2))
992 case QLCNIC_ADD_VLAN:
993 arg1 |= (BIT_2 | BIT_5);
994 arg1 |= (esw_cfg->vlan_id << 16);
996 case QLCNIC_DEL_VLAN:
997 arg1 |= (BIT_3 | BIT_5);
998 arg1 &= ~(0x0ffff << 16);
1004 err = qlcnic_issue_cmd(adapter,
1005 adapter->ahw->pci_func,
1006 adapter->fw_hal_version,
1010 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
1012 if (err != QLCNIC_RCODE_SUCCESS) {
1013 dev_err(&adapter->pdev->dev,
1014 "Failed to configure eswitch pci func %d\n", pci_func);
1016 dev_info(&adapter->pdev->dev,
1017 "Configured eSwitch for pci func %d\n", pci_func);
1024 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1025 struct qlcnic_esw_func_cfg *esw_cfg)
1029 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
1030 phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
1032 phy_port = adapter->physical_port;
1034 arg1 |= (esw_cfg->pci_func << 8);
1035 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1038 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1039 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1040 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1041 esw_cfg->mac_override = !!(arg1 & BIT_7);
1042 esw_cfg->vlan_id = LSW(arg1 >> 16);
1043 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1044 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);