2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
73 return wrb->payload.embedded_payload;
76 static void be_mcc_notify(struct be_adapter *adapter)
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
81 if (be_error(adapter))
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
119 addr = ((addr << 16) << 16) | tag0;
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
130 /* Just swap the status to host endian; mcc tag is opaquely copied
132 be_dws_le_to_cpu(compl, 4);
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
192 /* Link state evt is a string of bytes; no need for endian swapping */
193 static void be_async_link_state_process(struct be_adapter *adapter,
194 struct be_async_event_link_state *evt)
196 /* When link status changes, link speed must be re-queried from FW */
197 adapter->phy.link_speed = -1;
199 /* Ignore physical link event */
200 if (lancer_chip(adapter) &&
201 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
204 /* For the initial link status do not rely on the ASYNC event as
205 * it may not be received in some cases.
207 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
208 be_link_status_update(adapter, evt->port_link_status);
211 /* Grp5 CoS Priority evt */
212 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
213 struct be_async_event_grp5_cos_priority *evt)
216 adapter->vlan_prio_bmap = evt->available_priority_bmap;
217 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
218 adapter->recommended_prio =
219 evt->reco_default_priority << VLAN_PRIO_SHIFT;
223 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
224 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
225 struct be_async_event_grp5_qos_link_speed *evt)
227 if (adapter->phy.link_speed >= 0 &&
228 evt->physical_port == adapter->port_num)
229 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
233 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
234 struct be_async_event_grp5_pvid_state *evt)
237 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
242 static void be_async_grp5_evt_process(struct be_adapter *adapter,
243 u32 trailer, struct be_mcc_compl *evt)
247 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
248 ASYNC_TRAILER_EVENT_TYPE_MASK;
250 switch (event_type) {
251 case ASYNC_EVENT_COS_PRIORITY:
252 be_async_grp5_cos_priority_process(adapter,
253 (struct be_async_event_grp5_cos_priority *)evt);
255 case ASYNC_EVENT_QOS_SPEED:
256 be_async_grp5_qos_speed_process(adapter,
257 (struct be_async_event_grp5_qos_link_speed *)evt);
259 case ASYNC_EVENT_PVID_STATE:
260 be_async_grp5_pvid_state_process(adapter,
261 (struct be_async_event_grp5_pvid_state *)evt);
264 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
270 static void be_async_dbg_evt_process(struct be_adapter *adapter,
271 u32 trailer, struct be_mcc_compl *cmp)
274 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
276 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
277 ASYNC_TRAILER_EVENT_TYPE_MASK;
279 switch (event_type) {
280 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
282 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
283 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
286 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
292 static inline bool is_link_state_evt(u32 trailer)
294 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
295 ASYNC_TRAILER_EVENT_CODE_MASK) ==
296 ASYNC_EVENT_CODE_LINK_STATE;
299 static inline bool is_grp5_evt(u32 trailer)
301 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
302 ASYNC_TRAILER_EVENT_CODE_MASK) ==
303 ASYNC_EVENT_CODE_GRP_5);
306 static inline bool is_dbg_evt(u32 trailer)
308 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
309 ASYNC_TRAILER_EVENT_CODE_MASK) ==
310 ASYNC_EVENT_CODE_QNQ);
313 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
315 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
316 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
318 if (be_mcc_compl_is_new(compl)) {
319 queue_tail_inc(mcc_cq);
325 void be_async_mcc_enable(struct be_adapter *adapter)
327 spin_lock_bh(&adapter->mcc_cq_lock);
329 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
330 adapter->mcc_obj.rearm_cq = true;
332 spin_unlock_bh(&adapter->mcc_cq_lock);
335 void be_async_mcc_disable(struct be_adapter *adapter)
337 spin_lock_bh(&adapter->mcc_cq_lock);
339 adapter->mcc_obj.rearm_cq = false;
340 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
342 spin_unlock_bh(&adapter->mcc_cq_lock);
345 int be_process_mcc(struct be_adapter *adapter)
347 struct be_mcc_compl *compl;
348 int num = 0, status = 0;
349 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
351 spin_lock(&adapter->mcc_cq_lock);
352 while ((compl = be_mcc_compl_get(adapter))) {
353 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
354 /* Interpret flags as an async trailer */
355 if (is_link_state_evt(compl->flags))
356 be_async_link_state_process(adapter,
357 (struct be_async_event_link_state *) compl);
358 else if (is_grp5_evt(compl->flags))
359 be_async_grp5_evt_process(adapter,
360 compl->flags, compl);
361 else if (is_dbg_evt(compl->flags))
362 be_async_dbg_evt_process(adapter,
363 compl->flags, compl);
364 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
365 status = be_mcc_compl_process(adapter, compl);
366 atomic_dec(&mcc_obj->q.used);
368 be_mcc_compl_use(compl);
373 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
375 spin_unlock(&adapter->mcc_cq_lock);
379 /* Wait till no more pending mcc requests are present */
380 static int be_mcc_wait_compl(struct be_adapter *adapter)
382 #define mcc_timeout 120000 /* 12s timeout */
384 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
386 for (i = 0; i < mcc_timeout; i++) {
387 if (be_error(adapter))
391 status = be_process_mcc(adapter);
394 if (atomic_read(&mcc_obj->q.used) == 0)
398 if (i == mcc_timeout) {
399 dev_err(&adapter->pdev->dev, "FW not responding\n");
400 adapter->fw_timeout = true;
406 /* Notify MCC requests and wait for completion */
407 static int be_mcc_notify_wait(struct be_adapter *adapter)
410 struct be_mcc_wrb *wrb;
411 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
412 u16 index = mcc_obj->q.head;
413 struct be_cmd_resp_hdr *resp;
415 index_dec(&index, mcc_obj->q.len);
416 wrb = queue_index_node(&mcc_obj->q, index);
418 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
420 be_mcc_notify(adapter);
422 status = be_mcc_wait_compl(adapter);
426 status = resp->status;
431 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
437 if (be_error(adapter))
440 ready = ioread32(db);
441 if (ready == 0xffffffff)
444 ready &= MPU_MAILBOX_DB_RDY_MASK;
449 dev_err(&adapter->pdev->dev, "FW not responding\n");
450 adapter->fw_timeout = true;
451 be_detect_error(adapter);
463 * Insert the mailbox address into the doorbell in two steps
464 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
466 static int be_mbox_notify_wait(struct be_adapter *adapter)
470 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
471 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
472 struct be_mcc_mailbox *mbox = mbox_mem->va;
473 struct be_mcc_compl *compl = &mbox->compl;
475 /* wait for ready to be set */
476 status = be_mbox_db_ready_wait(adapter, db);
480 val |= MPU_MAILBOX_DB_HI_MASK;
481 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
482 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
485 /* wait for ready to be set */
486 status = be_mbox_db_ready_wait(adapter, db);
491 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
492 val |= (u32)(mbox_mem->dma >> 4) << 2;
495 status = be_mbox_db_ready_wait(adapter, db);
499 /* A cq entry has been made now */
500 if (be_mcc_compl_is_new(compl)) {
501 status = be_mcc_compl_process(adapter, &mbox->compl);
502 be_mcc_compl_use(compl);
506 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
512 static u16 be_POST_stage_get(struct be_adapter *adapter)
516 if (BEx_chip(adapter))
517 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
519 pci_read_config_dword(adapter->pdev,
520 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
522 return sem & POST_STAGE_MASK;
525 int lancer_wait_ready(struct be_adapter *adapter)
527 #define SLIPORT_READY_TIMEOUT 30
531 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
532 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
533 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
539 if (i == SLIPORT_READY_TIMEOUT)
545 static bool lancer_provisioning_error(struct be_adapter *adapter)
547 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
550 sliport_err1 = ioread32(adapter->db +
551 SLIPORT_ERROR1_OFFSET);
552 sliport_err2 = ioread32(adapter->db +
553 SLIPORT_ERROR2_OFFSET);
555 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
556 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
562 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
565 u32 sliport_status, err, reset_needed;
568 resource_error = lancer_provisioning_error(adapter);
572 status = lancer_wait_ready(adapter);
574 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
575 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
576 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
577 if (err && reset_needed) {
578 iowrite32(SLI_PORT_CONTROL_IP_MASK,
579 adapter->db + SLIPORT_CONTROL_OFFSET);
581 /* check adapter has corrected the error */
582 status = lancer_wait_ready(adapter);
583 sliport_status = ioread32(adapter->db +
584 SLIPORT_STATUS_OFFSET);
585 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
586 SLIPORT_STATUS_RN_MASK);
587 if (status || sliport_status)
589 } else if (err || reset_needed) {
593 /* Stop error recovery if error is not recoverable.
594 * No resource error is temporary errors and will go away
595 * when PF provisions resources.
597 resource_error = lancer_provisioning_error(adapter);
604 int be_fw_wait_ready(struct be_adapter *adapter)
607 int status, timeout = 0;
608 struct device *dev = &adapter->pdev->dev;
610 if (lancer_chip(adapter)) {
611 status = lancer_wait_ready(adapter);
616 stage = be_POST_stage_get(adapter);
617 if (stage == POST_STAGE_ARMFW_RDY)
620 dev_info(dev, "Waiting for POST, %ds elapsed\n",
622 if (msleep_interruptible(2000)) {
623 dev_err(dev, "Waiting for POST aborted\n");
627 } while (timeout < 60);
629 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
634 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
636 return &wrb->payload.sgl[0];
639 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
642 wrb->tag0 = addr & 0xFFFFFFFF;
643 wrb->tag1 = upper_32_bits(addr);
646 /* Don't touch the hdr after it's prepared */
647 /* mem will be NULL for embedded commands */
648 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
649 u8 subsystem, u8 opcode, int cmd_len,
650 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
654 req_hdr->opcode = opcode;
655 req_hdr->subsystem = subsystem;
656 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
657 req_hdr->version = 0;
658 fill_wrb_tags(wrb, (ulong) req_hdr);
659 wrb->payload_length = cmd_len;
661 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
662 MCC_WRB_SGE_CNT_SHIFT;
663 sge = nonembedded_sgl(wrb);
664 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
665 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
666 sge->len = cpu_to_le32(mem->size);
668 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
669 be_dws_cpu_to_le(wrb, 8);
672 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
673 struct be_dma_mem *mem)
675 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
676 u64 dma = (u64)mem->dma;
678 for (i = 0; i < buf_pages; i++) {
679 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
680 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
685 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
687 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
688 struct be_mcc_wrb *wrb
689 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
690 memset(wrb, 0, sizeof(*wrb));
694 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
696 struct be_queue_info *mccq = &adapter->mcc_obj.q;
697 struct be_mcc_wrb *wrb;
702 if (atomic_read(&mccq->used) >= mccq->len)
705 wrb = queue_head_node(mccq);
706 queue_head_inc(mccq);
707 atomic_inc(&mccq->used);
708 memset(wrb, 0, sizeof(*wrb));
712 static bool use_mcc(struct be_adapter *adapter)
714 return adapter->mcc_obj.q.created;
717 /* Must be used only in process context */
718 static int be_cmd_lock(struct be_adapter *adapter)
720 if (use_mcc(adapter)) {
721 spin_lock_bh(&adapter->mcc_lock);
724 return mutex_lock_interruptible(&adapter->mbox_lock);
728 /* Must be used only in process context */
729 static void be_cmd_unlock(struct be_adapter *adapter)
731 if (use_mcc(adapter))
732 spin_unlock_bh(&adapter->mcc_lock);
734 return mutex_unlock(&adapter->mbox_lock);
737 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
738 struct be_mcc_wrb *wrb)
740 struct be_mcc_wrb *dest_wrb;
742 if (use_mcc(adapter)) {
743 dest_wrb = wrb_from_mccq(adapter);
747 dest_wrb = wrb_from_mbox(adapter);
750 memcpy(dest_wrb, wrb, sizeof(*wrb));
751 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
752 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
757 /* Must be used only in process context */
758 static int be_cmd_notify_wait(struct be_adapter *adapter,
759 struct be_mcc_wrb *wrb)
761 struct be_mcc_wrb *dest_wrb;
764 status = be_cmd_lock(adapter);
768 dest_wrb = be_cmd_copy(adapter, wrb);
772 if (use_mcc(adapter))
773 status = be_mcc_notify_wait(adapter);
775 status = be_mbox_notify_wait(adapter);
778 memcpy(wrb, dest_wrb, sizeof(*wrb));
780 be_cmd_unlock(adapter);
784 /* Tell fw we're about to start firing cmds by writing a
785 * special pattern across the wrb hdr; uses mbox
787 int be_cmd_fw_init(struct be_adapter *adapter)
792 if (lancer_chip(adapter))
795 if (mutex_lock_interruptible(&adapter->mbox_lock))
798 wrb = (u8 *)wrb_from_mbox(adapter);
808 status = be_mbox_notify_wait(adapter);
810 mutex_unlock(&adapter->mbox_lock);
814 /* Tell fw we're done with firing cmds by writing a
815 * special pattern across the wrb hdr; uses mbox
817 int be_cmd_fw_clean(struct be_adapter *adapter)
822 if (lancer_chip(adapter))
825 if (mutex_lock_interruptible(&adapter->mbox_lock))
828 wrb = (u8 *)wrb_from_mbox(adapter);
838 status = be_mbox_notify_wait(adapter);
840 mutex_unlock(&adapter->mbox_lock);
844 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
846 struct be_mcc_wrb *wrb;
847 struct be_cmd_req_eq_create *req;
848 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
851 if (mutex_lock_interruptible(&adapter->mbox_lock))
854 wrb = wrb_from_mbox(adapter);
855 req = embedded_payload(wrb);
857 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
858 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
860 /* Support for EQ_CREATEv2 available only SH-R onwards */
861 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
864 req->hdr.version = ver;
865 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
867 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
869 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
870 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
871 __ilog2_u32(eqo->q.len / 256));
872 be_dws_cpu_to_le(req->context, sizeof(req->context));
874 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
876 status = be_mbox_notify_wait(adapter);
878 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
879 eqo->q.id = le16_to_cpu(resp->eq_id);
881 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
882 eqo->q.created = true;
885 mutex_unlock(&adapter->mbox_lock);
890 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
891 bool permanent, u32 if_handle, u32 pmac_id)
893 struct be_mcc_wrb *wrb;
894 struct be_cmd_req_mac_query *req;
897 spin_lock_bh(&adapter->mcc_lock);
899 wrb = wrb_from_mccq(adapter);
904 req = embedded_payload(wrb);
906 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
907 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
908 req->type = MAC_ADDRESS_TYPE_NETWORK;
912 req->if_id = cpu_to_le16((u16) if_handle);
913 req->pmac_id = cpu_to_le32(pmac_id);
917 status = be_mcc_notify_wait(adapter);
919 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
920 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
924 spin_unlock_bh(&adapter->mcc_lock);
928 /* Uses synchronous MCCQ */
929 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
930 u32 if_id, u32 *pmac_id, u32 domain)
932 struct be_mcc_wrb *wrb;
933 struct be_cmd_req_pmac_add *req;
936 spin_lock_bh(&adapter->mcc_lock);
938 wrb = wrb_from_mccq(adapter);
943 req = embedded_payload(wrb);
945 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
946 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
948 req->hdr.domain = domain;
949 req->if_id = cpu_to_le32(if_id);
950 memcpy(req->mac_address, mac_addr, ETH_ALEN);
952 status = be_mcc_notify_wait(adapter);
954 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
955 *pmac_id = le32_to_cpu(resp->pmac_id);
959 spin_unlock_bh(&adapter->mcc_lock);
961 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
967 /* Uses synchronous MCCQ */
968 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
970 struct be_mcc_wrb *wrb;
971 struct be_cmd_req_pmac_del *req;
977 spin_lock_bh(&adapter->mcc_lock);
979 wrb = wrb_from_mccq(adapter);
984 req = embedded_payload(wrb);
986 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
987 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
989 req->hdr.domain = dom;
990 req->if_id = cpu_to_le32(if_id);
991 req->pmac_id = cpu_to_le32(pmac_id);
993 status = be_mcc_notify_wait(adapter);
996 spin_unlock_bh(&adapter->mcc_lock);
1001 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1002 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1004 struct be_mcc_wrb *wrb;
1005 struct be_cmd_req_cq_create *req;
1006 struct be_dma_mem *q_mem = &cq->dma_mem;
1010 if (mutex_lock_interruptible(&adapter->mbox_lock))
1013 wrb = wrb_from_mbox(adapter);
1014 req = embedded_payload(wrb);
1015 ctxt = &req->context;
1017 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1018 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
1020 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1022 if (BEx_chip(adapter)) {
1023 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1025 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1027 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1028 __ilog2_u32(cq->len/256));
1029 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1030 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1031 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1033 req->hdr.version = 2;
1034 req->page_size = 1; /* 1 for 4K */
1035 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1037 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1038 __ilog2_u32(cq->len/256));
1039 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1040 AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
1042 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1046 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1048 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1050 status = be_mbox_notify_wait(adapter);
1052 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1053 cq->id = le16_to_cpu(resp->cq_id);
1057 mutex_unlock(&adapter->mbox_lock);
1062 static u32 be_encoded_q_len(int q_len)
1064 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1065 if (len_encoded == 16)
1070 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1071 struct be_queue_info *mccq,
1072 struct be_queue_info *cq)
1074 struct be_mcc_wrb *wrb;
1075 struct be_cmd_req_mcc_ext_create *req;
1076 struct be_dma_mem *q_mem = &mccq->dma_mem;
1080 if (mutex_lock_interruptible(&adapter->mbox_lock))
1083 wrb = wrb_from_mbox(adapter);
1084 req = embedded_payload(wrb);
1085 ctxt = &req->context;
1087 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1088 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1090 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1091 if (lancer_chip(adapter)) {
1092 req->hdr.version = 1;
1093 req->cq_id = cpu_to_le16(cq->id);
1095 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1096 be_encoded_q_len(mccq->len));
1097 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1098 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1100 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1104 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1105 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1106 be_encoded_q_len(mccq->len));
1107 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1110 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1111 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1112 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1113 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1115 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1117 status = be_mbox_notify_wait(adapter);
1119 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1120 mccq->id = le16_to_cpu(resp->id);
1121 mccq->created = true;
1123 mutex_unlock(&adapter->mbox_lock);
1128 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1129 struct be_queue_info *mccq,
1130 struct be_queue_info *cq)
1132 struct be_mcc_wrb *wrb;
1133 struct be_cmd_req_mcc_create *req;
1134 struct be_dma_mem *q_mem = &mccq->dma_mem;
1138 if (mutex_lock_interruptible(&adapter->mbox_lock))
1141 wrb = wrb_from_mbox(adapter);
1142 req = embedded_payload(wrb);
1143 ctxt = &req->context;
1145 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1146 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1148 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1150 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1151 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1152 be_encoded_q_len(mccq->len));
1153 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1155 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1157 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1159 status = be_mbox_notify_wait(adapter);
1161 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1162 mccq->id = le16_to_cpu(resp->id);
1163 mccq->created = true;
1166 mutex_unlock(&adapter->mbox_lock);
1170 int be_cmd_mccq_create(struct be_adapter *adapter,
1171 struct be_queue_info *mccq,
1172 struct be_queue_info *cq)
1176 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1177 if (status && !lancer_chip(adapter)) {
1178 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1179 "or newer to avoid conflicting priorities between NIC "
1180 "and FCoE traffic");
1181 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1186 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1188 struct be_mcc_wrb wrb = {0};
1189 struct be_cmd_req_eth_tx_create *req;
1190 struct be_queue_info *txq = &txo->q;
1191 struct be_queue_info *cq = &txo->cq;
1192 struct be_dma_mem *q_mem = &txq->dma_mem;
1193 int status, ver = 0;
1195 req = embedded_payload(&wrb);
1196 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1197 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1199 if (lancer_chip(adapter)) {
1200 req->hdr.version = 1;
1201 req->if_id = cpu_to_le16(adapter->if_handle);
1202 } else if (BEx_chip(adapter)) {
1203 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1204 req->hdr.version = 2;
1205 } else { /* For SH */
1206 req->hdr.version = 2;
1209 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1210 req->ulp_num = BE_ULP1_NUM;
1211 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1212 req->cq_id = cpu_to_le16(cq->id);
1213 req->queue_size = be_encoded_q_len(txq->len);
1214 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1215 ver = req->hdr.version;
1217 status = be_cmd_notify_wait(adapter, &wrb);
1219 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1220 txq->id = le16_to_cpu(resp->cid);
1222 txo->db_offset = le32_to_cpu(resp->db_offset);
1224 txo->db_offset = DB_TXULP1_OFFSET;
1225 txq->created = true;
1232 int be_cmd_rxq_create(struct be_adapter *adapter,
1233 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1234 u32 if_id, u32 rss, u8 *rss_id)
1236 struct be_mcc_wrb *wrb;
1237 struct be_cmd_req_eth_rx_create *req;
1238 struct be_dma_mem *q_mem = &rxq->dma_mem;
1241 spin_lock_bh(&adapter->mcc_lock);
1243 wrb = wrb_from_mccq(adapter);
1248 req = embedded_payload(wrb);
1250 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1251 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1253 req->cq_id = cpu_to_le16(cq_id);
1254 req->frag_size = fls(frag_size) - 1;
1256 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1257 req->interface_id = cpu_to_le32(if_id);
1258 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1259 req->rss_queue = cpu_to_le32(rss);
1261 status = be_mcc_notify_wait(adapter);
1263 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1264 rxq->id = le16_to_cpu(resp->id);
1265 rxq->created = true;
1266 *rss_id = resp->rss_id;
1270 spin_unlock_bh(&adapter->mcc_lock);
1274 /* Generic destroyer function for all types of queues
1277 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1280 struct be_mcc_wrb *wrb;
1281 struct be_cmd_req_q_destroy *req;
1282 u8 subsys = 0, opcode = 0;
1285 if (mutex_lock_interruptible(&adapter->mbox_lock))
1288 wrb = wrb_from_mbox(adapter);
1289 req = embedded_payload(wrb);
1291 switch (queue_type) {
1293 subsys = CMD_SUBSYSTEM_COMMON;
1294 opcode = OPCODE_COMMON_EQ_DESTROY;
1297 subsys = CMD_SUBSYSTEM_COMMON;
1298 opcode = OPCODE_COMMON_CQ_DESTROY;
1301 subsys = CMD_SUBSYSTEM_ETH;
1302 opcode = OPCODE_ETH_TX_DESTROY;
1305 subsys = CMD_SUBSYSTEM_ETH;
1306 opcode = OPCODE_ETH_RX_DESTROY;
1309 subsys = CMD_SUBSYSTEM_COMMON;
1310 opcode = OPCODE_COMMON_MCC_DESTROY;
1316 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1318 req->id = cpu_to_le16(q->id);
1320 status = be_mbox_notify_wait(adapter);
1323 mutex_unlock(&adapter->mbox_lock);
1328 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1330 struct be_mcc_wrb *wrb;
1331 struct be_cmd_req_q_destroy *req;
1334 spin_lock_bh(&adapter->mcc_lock);
1336 wrb = wrb_from_mccq(adapter);
1341 req = embedded_payload(wrb);
1343 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1344 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1345 req->id = cpu_to_le16(q->id);
1347 status = be_mcc_notify_wait(adapter);
1351 spin_unlock_bh(&adapter->mcc_lock);
1355 /* Create an rx filtering policy configuration on an i/f
1356 * Will use MBOX only if MCCQ has not been created.
1358 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1359 u32 *if_handle, u32 domain)
1361 struct be_mcc_wrb wrb = {0};
1362 struct be_cmd_req_if_create *req;
1365 req = embedded_payload(&wrb);
1366 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1367 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
1368 req->hdr.domain = domain;
1369 req->capability_flags = cpu_to_le32(cap_flags);
1370 req->enable_flags = cpu_to_le32(en_flags);
1371 req->pmac_invalid = true;
1373 status = be_cmd_notify_wait(adapter, &wrb);
1375 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1376 *if_handle = le32_to_cpu(resp->interface_id);
1378 /* Hack to retrieve VF's pmac-id on BE3 */
1379 if (BE3_chip(adapter) && !be_physfn(adapter))
1380 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1386 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1388 struct be_mcc_wrb *wrb;
1389 struct be_cmd_req_if_destroy *req;
1392 if (interface_id == -1)
1395 spin_lock_bh(&adapter->mcc_lock);
1397 wrb = wrb_from_mccq(adapter);
1402 req = embedded_payload(wrb);
1404 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1405 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1406 req->hdr.domain = domain;
1407 req->interface_id = cpu_to_le32(interface_id);
1409 status = be_mcc_notify_wait(adapter);
1411 spin_unlock_bh(&adapter->mcc_lock);
1415 /* Get stats is a non embedded command: the request is not embedded inside
1416 * WRB but is a separate dma memory block
1417 * Uses asynchronous MCC
1419 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1421 struct be_mcc_wrb *wrb;
1422 struct be_cmd_req_hdr *hdr;
1425 spin_lock_bh(&adapter->mcc_lock);
1427 wrb = wrb_from_mccq(adapter);
1432 hdr = nonemb_cmd->va;
1434 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1435 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1437 /* version 1 of the cmd is not supported only by BE2 */
1438 if (!BE2_chip(adapter))
1441 be_mcc_notify(adapter);
1442 adapter->stats_cmd_sent = true;
1445 spin_unlock_bh(&adapter->mcc_lock);
1450 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1451 struct be_dma_mem *nonemb_cmd)
1454 struct be_mcc_wrb *wrb;
1455 struct lancer_cmd_req_pport_stats *req;
1458 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1462 spin_lock_bh(&adapter->mcc_lock);
1464 wrb = wrb_from_mccq(adapter);
1469 req = nonemb_cmd->va;
1471 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1472 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1475 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1476 req->cmd_params.params.reset_stats = 0;
1478 be_mcc_notify(adapter);
1479 adapter->stats_cmd_sent = true;
1482 spin_unlock_bh(&adapter->mcc_lock);
1486 static int be_mac_to_link_speed(int mac_speed)
1488 switch (mac_speed) {
1489 case PHY_LINK_SPEED_ZERO:
1491 case PHY_LINK_SPEED_10MBPS:
1493 case PHY_LINK_SPEED_100MBPS:
1495 case PHY_LINK_SPEED_1GBPS:
1497 case PHY_LINK_SPEED_10GBPS:
1499 case PHY_LINK_SPEED_20GBPS:
1501 case PHY_LINK_SPEED_25GBPS:
1503 case PHY_LINK_SPEED_40GBPS:
1509 /* Uses synchronous mcc
1510 * Returns link_speed in Mbps
1512 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1513 u8 *link_status, u32 dom)
1515 struct be_mcc_wrb *wrb;
1516 struct be_cmd_req_link_status *req;
1519 spin_lock_bh(&adapter->mcc_lock);
1522 *link_status = LINK_DOWN;
1524 wrb = wrb_from_mccq(adapter);
1529 req = embedded_payload(wrb);
1531 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1532 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1534 /* version 1 of the cmd is not supported only by BE2 */
1535 if (!BE2_chip(adapter))
1536 req->hdr.version = 1;
1538 req->hdr.domain = dom;
1540 status = be_mcc_notify_wait(adapter);
1542 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1544 *link_speed = resp->link_speed ?
1545 le16_to_cpu(resp->link_speed) * 10 :
1546 be_mac_to_link_speed(resp->mac_speed);
1548 if (!resp->logical_link_status)
1552 *link_status = resp->logical_link_status;
1556 spin_unlock_bh(&adapter->mcc_lock);
1560 /* Uses synchronous mcc */
1561 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1563 struct be_mcc_wrb *wrb;
1564 struct be_cmd_req_get_cntl_addnl_attribs *req;
1567 spin_lock_bh(&adapter->mcc_lock);
1569 wrb = wrb_from_mccq(adapter);
1574 req = embedded_payload(wrb);
1576 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1577 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1580 be_mcc_notify(adapter);
1583 spin_unlock_bh(&adapter->mcc_lock);
1587 /* Uses synchronous mcc */
1588 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1590 struct be_mcc_wrb *wrb;
1591 struct be_cmd_req_get_fat *req;
1594 spin_lock_bh(&adapter->mcc_lock);
1596 wrb = wrb_from_mccq(adapter);
1601 req = embedded_payload(wrb);
1603 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1604 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1605 req->fat_operation = cpu_to_le32(QUERY_FAT);
1606 status = be_mcc_notify_wait(adapter);
1608 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1609 if (log_size && resp->log_size)
1610 *log_size = le32_to_cpu(resp->log_size) -
1614 spin_unlock_bh(&adapter->mcc_lock);
1618 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1620 struct be_dma_mem get_fat_cmd;
1621 struct be_mcc_wrb *wrb;
1622 struct be_cmd_req_get_fat *req;
1623 u32 offset = 0, total_size, buf_size,
1624 log_offset = sizeof(u32), payload_len;
1630 total_size = buf_len;
1632 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1633 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1636 if (!get_fat_cmd.va) {
1638 dev_err(&adapter->pdev->dev,
1639 "Memory allocation failure while retrieving FAT data\n");
1643 spin_lock_bh(&adapter->mcc_lock);
1645 while (total_size) {
1646 buf_size = min(total_size, (u32)60*1024);
1647 total_size -= buf_size;
1649 wrb = wrb_from_mccq(adapter);
1654 req = get_fat_cmd.va;
1656 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1657 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1658 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1661 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1662 req->read_log_offset = cpu_to_le32(log_offset);
1663 req->read_log_length = cpu_to_le32(buf_size);
1664 req->data_buffer_size = cpu_to_le32(buf_size);
1666 status = be_mcc_notify_wait(adapter);
1668 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1669 memcpy(buf + offset,
1671 le32_to_cpu(resp->read_log_length));
1673 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1677 log_offset += buf_size;
1680 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1683 spin_unlock_bh(&adapter->mcc_lock);
1686 /* Uses synchronous mcc */
1687 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1690 struct be_mcc_wrb *wrb;
1691 struct be_cmd_req_get_fw_version *req;
1694 spin_lock_bh(&adapter->mcc_lock);
1696 wrb = wrb_from_mccq(adapter);
1702 req = embedded_payload(wrb);
1704 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1705 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1706 status = be_mcc_notify_wait(adapter);
1708 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1709 strcpy(fw_ver, resp->firmware_version_string);
1711 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1714 spin_unlock_bh(&adapter->mcc_lock);
1718 /* set the EQ delay interval of an EQ to specified value
1721 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1723 struct be_mcc_wrb *wrb;
1724 struct be_cmd_req_modify_eq_delay *req;
1727 spin_lock_bh(&adapter->mcc_lock);
1729 wrb = wrb_from_mccq(adapter);
1734 req = embedded_payload(wrb);
1736 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1737 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1739 req->num_eq = cpu_to_le32(1);
1740 req->delay[0].eq_id = cpu_to_le32(eq_id);
1741 req->delay[0].phase = 0;
1742 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1744 be_mcc_notify(adapter);
1747 spin_unlock_bh(&adapter->mcc_lock);
1751 /* Uses sycnhronous mcc */
1752 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1753 u32 num, bool untagged, bool promiscuous)
1755 struct be_mcc_wrb *wrb;
1756 struct be_cmd_req_vlan_config *req;
1759 spin_lock_bh(&adapter->mcc_lock);
1761 wrb = wrb_from_mccq(adapter);
1766 req = embedded_payload(wrb);
1768 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1769 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1771 req->interface_id = if_id;
1772 req->promiscuous = promiscuous;
1773 req->untagged = untagged;
1774 req->num_vlan = num;
1776 memcpy(req->normal_vlan, vtag_array,
1777 req->num_vlan * sizeof(vtag_array[0]));
1780 status = be_mcc_notify_wait(adapter);
1783 spin_unlock_bh(&adapter->mcc_lock);
1787 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1789 struct be_mcc_wrb *wrb;
1790 struct be_dma_mem *mem = &adapter->rx_filter;
1791 struct be_cmd_req_rx_filter *req = mem->va;
1794 spin_lock_bh(&adapter->mcc_lock);
1796 wrb = wrb_from_mccq(adapter);
1801 memset(req, 0, sizeof(*req));
1802 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1803 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1806 req->if_id = cpu_to_le32(adapter->if_handle);
1807 if (flags & IFF_PROMISC) {
1808 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1809 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1810 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1812 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1813 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1814 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1815 } else if (flags & IFF_ALLMULTI) {
1816 req->if_flags_mask = req->if_flags =
1817 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1818 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1819 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1823 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1825 struct netdev_hw_addr *ha;
1828 req->if_flags_mask = req->if_flags =
1829 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1831 /* Reset mcast promisc mode if already set by setting mask
1832 * and not setting flags field
1834 req->if_flags_mask |=
1835 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1836 be_if_cap_flags(adapter));
1837 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1838 netdev_for_each_mc_addr(ha, adapter->netdev)
1839 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1842 status = be_mcc_notify_wait(adapter);
1844 spin_unlock_bh(&adapter->mcc_lock);
1848 /* Uses synchrounous mcc */
1849 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1851 struct be_mcc_wrb *wrb;
1852 struct be_cmd_req_set_flow_control *req;
1855 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1856 CMD_SUBSYSTEM_COMMON))
1859 spin_lock_bh(&adapter->mcc_lock);
1861 wrb = wrb_from_mccq(adapter);
1866 req = embedded_payload(wrb);
1868 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1869 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1871 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1872 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1874 status = be_mcc_notify_wait(adapter);
1877 spin_unlock_bh(&adapter->mcc_lock);
1882 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1884 struct be_mcc_wrb *wrb;
1885 struct be_cmd_req_get_flow_control *req;
1888 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1889 CMD_SUBSYSTEM_COMMON))
1892 spin_lock_bh(&adapter->mcc_lock);
1894 wrb = wrb_from_mccq(adapter);
1899 req = embedded_payload(wrb);
1901 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1902 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1904 status = be_mcc_notify_wait(adapter);
1906 struct be_cmd_resp_get_flow_control *resp =
1907 embedded_payload(wrb);
1908 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1909 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1913 spin_unlock_bh(&adapter->mcc_lock);
1918 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1919 u32 *mode, u32 *caps, u16 *asic_rev)
1921 struct be_mcc_wrb *wrb;
1922 struct be_cmd_req_query_fw_cfg *req;
1925 if (mutex_lock_interruptible(&adapter->mbox_lock))
1928 wrb = wrb_from_mbox(adapter);
1929 req = embedded_payload(wrb);
1931 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1932 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1934 status = be_mbox_notify_wait(adapter);
1936 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1937 *port_num = le32_to_cpu(resp->phys_port);
1938 *mode = le32_to_cpu(resp->function_mode);
1939 *caps = le32_to_cpu(resp->function_caps);
1940 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1943 mutex_unlock(&adapter->mbox_lock);
1948 int be_cmd_reset_function(struct be_adapter *adapter)
1950 struct be_mcc_wrb *wrb;
1951 struct be_cmd_req_hdr *req;
1954 if (lancer_chip(adapter)) {
1955 status = lancer_wait_ready(adapter);
1957 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1958 adapter->db + SLIPORT_CONTROL_OFFSET);
1959 status = lancer_test_and_set_rdy_state(adapter);
1962 dev_err(&adapter->pdev->dev,
1963 "Adapter in non recoverable error\n");
1968 if (mutex_lock_interruptible(&adapter->mbox_lock))
1971 wrb = wrb_from_mbox(adapter);
1972 req = embedded_payload(wrb);
1974 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1975 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1977 status = be_mbox_notify_wait(adapter);
1979 mutex_unlock(&adapter->mbox_lock);
1983 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1984 u32 rss_hash_opts, u16 table_size)
1986 struct be_mcc_wrb *wrb;
1987 struct be_cmd_req_rss_config *req;
1988 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1989 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1990 0x3ea83c02, 0x4a110304};
1993 if (mutex_lock_interruptible(&adapter->mbox_lock))
1996 wrb = wrb_from_mbox(adapter);
1997 req = embedded_payload(wrb);
1999 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2000 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2002 req->if_id = cpu_to_le32(adapter->if_handle);
2003 req->enable_rss = cpu_to_le16(rss_hash_opts);
2004 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2006 if (lancer_chip(adapter) || skyhawk_chip(adapter))
2007 req->hdr.version = 1;
2009 memcpy(req->cpu_table, rsstable, table_size);
2010 memcpy(req->hash, myhash, sizeof(myhash));
2011 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2013 status = be_mbox_notify_wait(adapter);
2015 mutex_unlock(&adapter->mbox_lock);
2020 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2021 u8 bcn, u8 sts, u8 state)
2023 struct be_mcc_wrb *wrb;
2024 struct be_cmd_req_enable_disable_beacon *req;
2027 spin_lock_bh(&adapter->mcc_lock);
2029 wrb = wrb_from_mccq(adapter);
2034 req = embedded_payload(wrb);
2036 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2037 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
2039 req->port_num = port_num;
2040 req->beacon_state = state;
2041 req->beacon_duration = bcn;
2042 req->status_duration = sts;
2044 status = be_mcc_notify_wait(adapter);
2047 spin_unlock_bh(&adapter->mcc_lock);
2052 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2054 struct be_mcc_wrb *wrb;
2055 struct be_cmd_req_get_beacon_state *req;
2058 spin_lock_bh(&adapter->mcc_lock);
2060 wrb = wrb_from_mccq(adapter);
2065 req = embedded_payload(wrb);
2067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2068 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
2070 req->port_num = port_num;
2072 status = be_mcc_notify_wait(adapter);
2074 struct be_cmd_resp_get_beacon_state *resp =
2075 embedded_payload(wrb);
2076 *state = resp->beacon_state;
2080 spin_unlock_bh(&adapter->mcc_lock);
2084 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2085 u32 data_size, u32 data_offset,
2086 const char *obj_name, u32 *data_written,
2087 u8 *change_status, u8 *addn_status)
2089 struct be_mcc_wrb *wrb;
2090 struct lancer_cmd_req_write_object *req;
2091 struct lancer_cmd_resp_write_object *resp;
2095 spin_lock_bh(&adapter->mcc_lock);
2096 adapter->flash_status = 0;
2098 wrb = wrb_from_mccq(adapter);
2104 req = embedded_payload(wrb);
2106 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2107 OPCODE_COMMON_WRITE_OBJECT,
2108 sizeof(struct lancer_cmd_req_write_object), wrb,
2111 ctxt = &req->context;
2112 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2113 write_length, ctxt, data_size);
2116 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2119 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2122 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2123 req->write_offset = cpu_to_le32(data_offset);
2124 strcpy(req->object_name, obj_name);
2125 req->descriptor_count = cpu_to_le32(1);
2126 req->buf_len = cpu_to_le32(data_size);
2127 req->addr_low = cpu_to_le32((cmd->dma +
2128 sizeof(struct lancer_cmd_req_write_object))
2130 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2131 sizeof(struct lancer_cmd_req_write_object)));
2133 be_mcc_notify(adapter);
2134 spin_unlock_bh(&adapter->mcc_lock);
2136 if (!wait_for_completion_timeout(&adapter->flash_compl,
2137 msecs_to_jiffies(60000)))
2140 status = adapter->flash_status;
2142 resp = embedded_payload(wrb);
2144 *data_written = le32_to_cpu(resp->actual_write_len);
2145 *change_status = resp->change_status;
2147 *addn_status = resp->additional_status;
2153 spin_unlock_bh(&adapter->mcc_lock);
2157 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2158 u32 data_size, u32 data_offset, const char *obj_name,
2159 u32 *data_read, u32 *eof, u8 *addn_status)
2161 struct be_mcc_wrb *wrb;
2162 struct lancer_cmd_req_read_object *req;
2163 struct lancer_cmd_resp_read_object *resp;
2166 spin_lock_bh(&adapter->mcc_lock);
2168 wrb = wrb_from_mccq(adapter);
2174 req = embedded_payload(wrb);
2176 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2177 OPCODE_COMMON_READ_OBJECT,
2178 sizeof(struct lancer_cmd_req_read_object), wrb,
2181 req->desired_read_len = cpu_to_le32(data_size);
2182 req->read_offset = cpu_to_le32(data_offset);
2183 strcpy(req->object_name, obj_name);
2184 req->descriptor_count = cpu_to_le32(1);
2185 req->buf_len = cpu_to_le32(data_size);
2186 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2187 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2189 status = be_mcc_notify_wait(adapter);
2191 resp = embedded_payload(wrb);
2193 *data_read = le32_to_cpu(resp->actual_read_len);
2194 *eof = le32_to_cpu(resp->eof);
2196 *addn_status = resp->additional_status;
2200 spin_unlock_bh(&adapter->mcc_lock);
2204 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2205 u32 flash_type, u32 flash_opcode, u32 buf_size)
2207 struct be_mcc_wrb *wrb;
2208 struct be_cmd_write_flashrom *req;
2211 spin_lock_bh(&adapter->mcc_lock);
2212 adapter->flash_status = 0;
2214 wrb = wrb_from_mccq(adapter);
2221 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2222 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2224 req->params.op_type = cpu_to_le32(flash_type);
2225 req->params.op_code = cpu_to_le32(flash_opcode);
2226 req->params.data_buf_size = cpu_to_le32(buf_size);
2228 be_mcc_notify(adapter);
2229 spin_unlock_bh(&adapter->mcc_lock);
2231 if (!wait_for_completion_timeout(&adapter->flash_compl,
2232 msecs_to_jiffies(40000)))
2235 status = adapter->flash_status;
2240 spin_unlock_bh(&adapter->mcc_lock);
2244 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2247 struct be_mcc_wrb *wrb;
2248 struct be_cmd_read_flash_crc *req;
2251 spin_lock_bh(&adapter->mcc_lock);
2253 wrb = wrb_from_mccq(adapter);
2258 req = embedded_payload(wrb);
2260 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2261 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2264 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2265 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2266 req->params.offset = cpu_to_le32(offset);
2267 req->params.data_buf_size = cpu_to_le32(0x4);
2269 status = be_mcc_notify_wait(adapter);
2271 memcpy(flashed_crc, req->crc, 4);
2274 spin_unlock_bh(&adapter->mcc_lock);
2278 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2279 struct be_dma_mem *nonemb_cmd)
2281 struct be_mcc_wrb *wrb;
2282 struct be_cmd_req_acpi_wol_magic_config *req;
2285 spin_lock_bh(&adapter->mcc_lock);
2287 wrb = wrb_from_mccq(adapter);
2292 req = nonemb_cmd->va;
2294 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2295 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2297 memcpy(req->magic_mac, mac, ETH_ALEN);
2299 status = be_mcc_notify_wait(adapter);
2302 spin_unlock_bh(&adapter->mcc_lock);
2306 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2307 u8 loopback_type, u8 enable)
2309 struct be_mcc_wrb *wrb;
2310 struct be_cmd_req_set_lmode *req;
2313 spin_lock_bh(&adapter->mcc_lock);
2315 wrb = wrb_from_mccq(adapter);
2321 req = embedded_payload(wrb);
2323 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2324 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2327 req->src_port = port_num;
2328 req->dest_port = port_num;
2329 req->loopback_type = loopback_type;
2330 req->loopback_state = enable;
2332 status = be_mcc_notify_wait(adapter);
2334 spin_unlock_bh(&adapter->mcc_lock);
2338 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2339 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2341 struct be_mcc_wrb *wrb;
2342 struct be_cmd_req_loopback_test *req;
2345 spin_lock_bh(&adapter->mcc_lock);
2347 wrb = wrb_from_mccq(adapter);
2353 req = embedded_payload(wrb);
2355 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2356 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2357 req->hdr.timeout = cpu_to_le32(4);
2359 req->pattern = cpu_to_le64(pattern);
2360 req->src_port = cpu_to_le32(port_num);
2361 req->dest_port = cpu_to_le32(port_num);
2362 req->pkt_size = cpu_to_le32(pkt_size);
2363 req->num_pkts = cpu_to_le32(num_pkts);
2364 req->loopback_type = cpu_to_le32(loopback_type);
2366 status = be_mcc_notify_wait(adapter);
2368 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2369 status = le32_to_cpu(resp->status);
2373 spin_unlock_bh(&adapter->mcc_lock);
2377 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2378 u32 byte_cnt, struct be_dma_mem *cmd)
2380 struct be_mcc_wrb *wrb;
2381 struct be_cmd_req_ddrdma_test *req;
2385 spin_lock_bh(&adapter->mcc_lock);
2387 wrb = wrb_from_mccq(adapter);
2393 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2394 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2396 req->pattern = cpu_to_le64(pattern);
2397 req->byte_count = cpu_to_le32(byte_cnt);
2398 for (i = 0; i < byte_cnt; i++) {
2399 req->snd_buff[i] = (u8)(pattern >> (j*8));
2405 status = be_mcc_notify_wait(adapter);
2408 struct be_cmd_resp_ddrdma_test *resp;
2410 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2417 spin_unlock_bh(&adapter->mcc_lock);
2421 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2422 struct be_dma_mem *nonemb_cmd)
2424 struct be_mcc_wrb *wrb;
2425 struct be_cmd_req_seeprom_read *req;
2428 spin_lock_bh(&adapter->mcc_lock);
2430 wrb = wrb_from_mccq(adapter);
2435 req = nonemb_cmd->va;
2437 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2438 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2441 status = be_mcc_notify_wait(adapter);
2444 spin_unlock_bh(&adapter->mcc_lock);
2448 int be_cmd_get_phy_info(struct be_adapter *adapter)
2450 struct be_mcc_wrb *wrb;
2451 struct be_cmd_req_get_phy_info *req;
2452 struct be_dma_mem cmd;
2455 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2456 CMD_SUBSYSTEM_COMMON))
2459 spin_lock_bh(&adapter->mcc_lock);
2461 wrb = wrb_from_mccq(adapter);
2466 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2467 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2470 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2477 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2478 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2481 status = be_mcc_notify_wait(adapter);
2483 struct be_phy_info *resp_phy_info =
2484 cmd.va + sizeof(struct be_cmd_req_hdr);
2485 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2486 adapter->phy.interface_type =
2487 le16_to_cpu(resp_phy_info->interface_type);
2488 adapter->phy.auto_speeds_supported =
2489 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2490 adapter->phy.fixed_speeds_supported =
2491 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2492 adapter->phy.misc_params =
2493 le32_to_cpu(resp_phy_info->misc_params);
2495 if (BE2_chip(adapter)) {
2496 adapter->phy.fixed_speeds_supported =
2497 BE_SUPPORTED_SPEED_10GBPS |
2498 BE_SUPPORTED_SPEED_1GBPS;
2501 pci_free_consistent(adapter->pdev, cmd.size,
2504 spin_unlock_bh(&adapter->mcc_lock);
2508 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2510 struct be_mcc_wrb *wrb;
2511 struct be_cmd_req_set_qos *req;
2514 spin_lock_bh(&adapter->mcc_lock);
2516 wrb = wrb_from_mccq(adapter);
2522 req = embedded_payload(wrb);
2524 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2525 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2527 req->hdr.domain = domain;
2528 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2529 req->max_bps_nic = cpu_to_le32(bps);
2531 status = be_mcc_notify_wait(adapter);
2534 spin_unlock_bh(&adapter->mcc_lock);
2538 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2540 struct be_mcc_wrb *wrb;
2541 struct be_cmd_req_cntl_attribs *req;
2542 struct be_cmd_resp_cntl_attribs *resp;
2544 int payload_len = max(sizeof(*req), sizeof(*resp));
2545 struct mgmt_controller_attrib *attribs;
2546 struct be_dma_mem attribs_cmd;
2548 if (mutex_lock_interruptible(&adapter->mbox_lock))
2551 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2552 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2553 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2555 if (!attribs_cmd.va) {
2556 dev_err(&adapter->pdev->dev,
2557 "Memory allocation failure\n");
2562 wrb = wrb_from_mbox(adapter);
2567 req = attribs_cmd.va;
2569 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2570 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2573 status = be_mbox_notify_wait(adapter);
2575 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2576 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2580 mutex_unlock(&adapter->mbox_lock);
2582 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2583 attribs_cmd.va, attribs_cmd.dma);
2588 int be_cmd_req_native_mode(struct be_adapter *adapter)
2590 struct be_mcc_wrb *wrb;
2591 struct be_cmd_req_set_func_cap *req;
2594 if (mutex_lock_interruptible(&adapter->mbox_lock))
2597 wrb = wrb_from_mbox(adapter);
2603 req = embedded_payload(wrb);
2605 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2606 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2608 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2609 CAPABILITY_BE3_NATIVE_ERX_API);
2610 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2612 status = be_mbox_notify_wait(adapter);
2614 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2615 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2616 CAPABILITY_BE3_NATIVE_ERX_API;
2617 if (!adapter->be3_native)
2618 dev_warn(&adapter->pdev->dev,
2619 "adapter not in advanced mode\n");
2622 mutex_unlock(&adapter->mbox_lock);
2626 /* Get privilege(s) for a function */
2627 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2630 struct be_mcc_wrb *wrb;
2631 struct be_cmd_req_get_fn_privileges *req;
2634 spin_lock_bh(&adapter->mcc_lock);
2636 wrb = wrb_from_mccq(adapter);
2642 req = embedded_payload(wrb);
2644 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2645 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2648 req->hdr.domain = domain;
2650 status = be_mcc_notify_wait(adapter);
2652 struct be_cmd_resp_get_fn_privileges *resp =
2653 embedded_payload(wrb);
2654 *privilege = le32_to_cpu(resp->privilege_mask);
2658 spin_unlock_bh(&adapter->mcc_lock);
2662 /* Set privilege(s) for a function */
2663 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2666 struct be_mcc_wrb *wrb;
2667 struct be_cmd_req_set_fn_privileges *req;
2670 spin_lock_bh(&adapter->mcc_lock);
2672 wrb = wrb_from_mccq(adapter);
2678 req = embedded_payload(wrb);
2679 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2680 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2682 req->hdr.domain = domain;
2683 if (lancer_chip(adapter))
2684 req->privileges_lancer = cpu_to_le32(privileges);
2686 req->privileges = cpu_to_le32(privileges);
2688 status = be_mcc_notify_wait(adapter);
2690 spin_unlock_bh(&adapter->mcc_lock);
2694 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2695 * pmac_id_valid: false => pmac_id or MAC address is requested.
2696 * If pmac_id is returned, pmac_id_valid is returned as true
2698 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2699 bool *pmac_id_valid, u32 *pmac_id, u8 domain)
2701 struct be_mcc_wrb *wrb;
2702 struct be_cmd_req_get_mac_list *req;
2705 struct be_dma_mem get_mac_list_cmd;
2708 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2709 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2710 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2711 get_mac_list_cmd.size,
2712 &get_mac_list_cmd.dma);
2714 if (!get_mac_list_cmd.va) {
2715 dev_err(&adapter->pdev->dev,
2716 "Memory allocation failure during GET_MAC_LIST\n");
2720 spin_lock_bh(&adapter->mcc_lock);
2722 wrb = wrb_from_mccq(adapter);
2728 req = get_mac_list_cmd.va;
2730 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2731 OPCODE_COMMON_GET_MAC_LIST,
2732 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2733 req->hdr.domain = domain;
2734 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2735 if (*pmac_id_valid) {
2736 req->mac_id = cpu_to_le32(*pmac_id);
2737 req->iface_id = cpu_to_le16(adapter->if_handle);
2738 req->perm_override = 0;
2740 req->perm_override = 1;
2743 status = be_mcc_notify_wait(adapter);
2745 struct be_cmd_resp_get_mac_list *resp =
2746 get_mac_list_cmd.va;
2748 if (*pmac_id_valid) {
2749 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2754 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2755 /* Mac list returned could contain one or more active mac_ids
2756 * or one or more true or pseudo permanant mac addresses.
2757 * If an active mac_id is present, return first active mac_id
2760 for (i = 0; i < mac_count; i++) {
2761 struct get_list_macaddr *mac_entry;
2765 mac_entry = &resp->macaddr_list[i];
2766 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2767 /* mac_id is a 32 bit value and mac_addr size
2770 if (mac_addr_size == sizeof(u32)) {
2771 *pmac_id_valid = true;
2772 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2773 *pmac_id = le32_to_cpu(mac_id);
2777 /* If no active mac_id found, return first mac addr */
2778 *pmac_id_valid = false;
2779 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2784 spin_unlock_bh(&adapter->mcc_lock);
2785 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2786 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2790 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
2794 if (BEx_chip(adapter))
2795 return be_cmd_mac_addr_query(adapter, mac, false,
2796 adapter->if_handle, curr_pmac_id);
2798 /* Fetch the MAC address using pmac_id */
2799 return be_cmd_get_mac_from_list(adapter, mac, &active,
2803 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2806 bool pmac_valid = false;
2808 memset(mac, 0, ETH_ALEN);
2810 if (BEx_chip(adapter)) {
2811 if (be_physfn(adapter))
2812 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2815 status = be_cmd_mac_addr_query(adapter, mac, false,
2816 adapter->if_handle, 0);
2818 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2825 /* Uses synchronous MCCQ */
2826 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2827 u8 mac_count, u32 domain)
2829 struct be_mcc_wrb *wrb;
2830 struct be_cmd_req_set_mac_list *req;
2832 struct be_dma_mem cmd;
2834 memset(&cmd, 0, sizeof(struct be_dma_mem));
2835 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2836 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2837 &cmd.dma, GFP_KERNEL);
2841 spin_lock_bh(&adapter->mcc_lock);
2843 wrb = wrb_from_mccq(adapter);
2850 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2851 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2854 req->hdr.domain = domain;
2855 req->mac_count = mac_count;
2857 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2859 status = be_mcc_notify_wait(adapter);
2862 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2864 spin_unlock_bh(&adapter->mcc_lock);
2868 /* Wrapper to delete any active MACs and provision the new mac.
2869 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2870 * current list are active.
2872 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2874 bool active_mac = false;
2875 u8 old_mac[ETH_ALEN];
2879 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
2881 if (!status && active_mac)
2882 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2884 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2887 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2888 u32 domain, u16 intf_id, u16 hsw_mode)
2890 struct be_mcc_wrb *wrb;
2891 struct be_cmd_req_set_hsw_config *req;
2895 spin_lock_bh(&adapter->mcc_lock);
2897 wrb = wrb_from_mccq(adapter);
2903 req = embedded_payload(wrb);
2904 ctxt = &req->context;
2906 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2907 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2909 req->hdr.domain = domain;
2910 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2912 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2913 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2915 if (!BEx_chip(adapter) && hsw_mode) {
2916 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
2917 ctxt, adapter->hba_port_num);
2918 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
2919 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
2923 be_dws_cpu_to_le(req->context, sizeof(req->context));
2924 status = be_mcc_notify_wait(adapter);
2927 spin_unlock_bh(&adapter->mcc_lock);
2931 /* Get Hyper switch config */
2932 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2933 u32 domain, u16 intf_id, u8 *mode)
2935 struct be_mcc_wrb *wrb;
2936 struct be_cmd_req_get_hsw_config *req;
2941 spin_lock_bh(&adapter->mcc_lock);
2943 wrb = wrb_from_mccq(adapter);
2949 req = embedded_payload(wrb);
2950 ctxt = &req->context;
2952 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2953 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2955 req->hdr.domain = domain;
2956 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2958 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2960 if (!BEx_chip(adapter)) {
2961 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2962 ctxt, adapter->hba_port_num);
2963 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
2965 be_dws_cpu_to_le(req->context, sizeof(req->context));
2967 status = be_mcc_notify_wait(adapter);
2969 struct be_cmd_resp_get_hsw_config *resp =
2970 embedded_payload(wrb);
2971 be_dws_le_to_cpu(&resp->context,
2972 sizeof(resp->context));
2973 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2974 pvid, &resp->context);
2976 *pvid = le16_to_cpu(vid);
2978 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2979 port_fwd_type, &resp->context);
2983 spin_unlock_bh(&adapter->mcc_lock);
2987 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2989 struct be_mcc_wrb *wrb;
2990 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2992 int payload_len = sizeof(*req);
2993 struct be_dma_mem cmd;
2995 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2999 if (mutex_lock_interruptible(&adapter->mbox_lock))
3002 memset(&cmd, 0, sizeof(struct be_dma_mem));
3003 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3004 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3007 dev_err(&adapter->pdev->dev,
3008 "Memory allocation failure\n");
3013 wrb = wrb_from_mbox(adapter);
3021 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3022 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3023 payload_len, wrb, &cmd);
3025 req->hdr.version = 1;
3026 req->query_options = BE_GET_WOL_CAP;
3028 status = be_mbox_notify_wait(adapter);
3030 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3031 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3033 /* the command could succeed misleadingly on old f/w
3034 * which is not aware of the V1 version. fake an error. */
3035 if (resp->hdr.response_length < payload_len) {
3039 adapter->wol_cap = resp->wol_settings;
3042 mutex_unlock(&adapter->mbox_lock);
3044 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3048 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3049 struct be_dma_mem *cmd)
3051 struct be_mcc_wrb *wrb;
3052 struct be_cmd_req_get_ext_fat_caps *req;
3055 if (mutex_lock_interruptible(&adapter->mbox_lock))
3058 wrb = wrb_from_mbox(adapter);
3065 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3066 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3067 cmd->size, wrb, cmd);
3068 req->parameter_type = cpu_to_le32(1);
3070 status = be_mbox_notify_wait(adapter);
3072 mutex_unlock(&adapter->mbox_lock);
3076 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3077 struct be_dma_mem *cmd,
3078 struct be_fat_conf_params *configs)
3080 struct be_mcc_wrb *wrb;
3081 struct be_cmd_req_set_ext_fat_caps *req;
3084 spin_lock_bh(&adapter->mcc_lock);
3086 wrb = wrb_from_mccq(adapter);
3093 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3094 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3095 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3096 cmd->size, wrb, cmd);
3098 status = be_mcc_notify_wait(adapter);
3100 spin_unlock_bh(&adapter->mcc_lock);
3104 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3106 struct be_mcc_wrb *wrb;
3107 struct be_cmd_req_get_port_name *req;
3110 if (!lancer_chip(adapter)) {
3111 *port_name = adapter->hba_port_num + '0';
3115 spin_lock_bh(&adapter->mcc_lock);
3117 wrb = wrb_from_mccq(adapter);
3123 req = embedded_payload(wrb);
3125 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3126 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3128 req->hdr.version = 1;
3130 status = be_mcc_notify_wait(adapter);
3132 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3133 *port_name = resp->port_name[adapter->hba_port_num];
3135 *port_name = adapter->hba_port_num + '0';
3138 spin_unlock_bh(&adapter->mcc_lock);
3142 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
3144 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3147 for (i = 0; i < desc_count; i++) {
3148 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3149 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
3150 return (struct be_nic_res_desc *)hdr;
3152 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3153 hdr = (void *)hdr + hdr->desc_len;
3158 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3161 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3162 struct be_pcie_res_desc *pcie;
3165 for (i = 0; i < desc_count; i++) {
3166 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3167 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3168 pcie = (struct be_pcie_res_desc *)hdr;
3169 if (pcie->pf_num == devfn)
3173 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3174 hdr = (void *)hdr + hdr->desc_len;
3179 static void be_copy_nic_desc(struct be_resources *res,
3180 struct be_nic_res_desc *desc)
3182 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3183 res->max_vlans = le16_to_cpu(desc->vlan_count);
3184 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3185 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3186 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3187 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3188 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3189 /* Clear flags that driver is not interested in */
3190 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3191 BE_IF_CAP_FLAGS_WANT;
3192 /* Need 1 RXQ as the default RXQ */
3193 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3194 res->max_rss_qs -= 1;
3198 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3200 struct be_mcc_wrb *wrb;
3201 struct be_cmd_req_get_func_config *req;
3203 struct be_dma_mem cmd;
3205 if (mutex_lock_interruptible(&adapter->mbox_lock))
3208 memset(&cmd, 0, sizeof(struct be_dma_mem));
3209 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3210 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3213 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3218 wrb = wrb_from_mbox(adapter);
3226 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3227 OPCODE_COMMON_GET_FUNC_CONFIG,
3228 cmd.size, wrb, &cmd);
3230 if (skyhawk_chip(adapter))
3231 req->hdr.version = 1;
3233 status = be_mbox_notify_wait(adapter);
3235 struct be_cmd_resp_get_func_config *resp = cmd.va;
3236 u32 desc_count = le32_to_cpu(resp->desc_count);
3237 struct be_nic_res_desc *desc;
3239 desc = be_get_nic_desc(resp->func_param, desc_count);
3245 adapter->pf_number = desc->pf_num;
3246 be_copy_nic_desc(res, desc);
3249 mutex_unlock(&adapter->mbox_lock);
3251 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3256 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3257 u8 domain, struct be_dma_mem *cmd)
3259 struct be_mcc_wrb *wrb;
3260 struct be_cmd_req_get_profile_config *req;
3263 if (mutex_lock_interruptible(&adapter->mbox_lock))
3265 wrb = wrb_from_mbox(adapter);
3268 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3269 OPCODE_COMMON_GET_PROFILE_CONFIG,
3270 cmd->size, wrb, cmd);
3272 req->type = ACTIVE_PROFILE_TYPE;
3273 req->hdr.domain = domain;
3274 if (!lancer_chip(adapter))
3275 req->hdr.version = 1;
3277 status = be_mbox_notify_wait(adapter);
3279 mutex_unlock(&adapter->mbox_lock);
3284 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3285 u8 domain, struct be_dma_mem *cmd)
3287 struct be_mcc_wrb *wrb;
3288 struct be_cmd_req_get_profile_config *req;
3291 spin_lock_bh(&adapter->mcc_lock);
3293 wrb = wrb_from_mccq(adapter);
3300 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3301 OPCODE_COMMON_GET_PROFILE_CONFIG,
3302 cmd->size, wrb, cmd);
3304 req->type = ACTIVE_PROFILE_TYPE;
3305 req->hdr.domain = domain;
3306 if (!lancer_chip(adapter))
3307 req->hdr.version = 1;
3309 status = be_mcc_notify_wait(adapter);
3312 spin_unlock_bh(&adapter->mcc_lock);
3316 /* Uses sync mcc, if MCCQ is already created otherwise mbox */
3317 int be_cmd_get_profile_config(struct be_adapter *adapter,
3318 struct be_resources *res, u8 domain)
3320 struct be_cmd_resp_get_profile_config *resp;
3321 struct be_pcie_res_desc *pcie;
3322 struct be_nic_res_desc *nic;
3323 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3324 struct be_dma_mem cmd;
3328 memset(&cmd, 0, sizeof(struct be_dma_mem));
3329 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3330 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3335 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3337 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3342 desc_count = le32_to_cpu(resp->desc_count);
3344 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3347 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3349 nic = be_get_nic_desc(resp->func_param, desc_count);
3351 be_copy_nic_desc(res, nic);
3355 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3359 /* Currently only Lancer uses this command and it supports version 0 only
3362 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3365 struct be_mcc_wrb *wrb;
3366 struct be_cmd_req_set_profile_config *req;
3369 spin_lock_bh(&adapter->mcc_lock);
3371 wrb = wrb_from_mccq(adapter);
3377 req = embedded_payload(wrb);
3379 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3380 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3382 req->hdr.domain = domain;
3383 req->desc_count = cpu_to_le32(1);
3384 req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3385 req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3386 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3387 req->nic_desc.pf_num = adapter->pf_number;
3388 req->nic_desc.vf_num = domain;
3390 /* Mark fields invalid */
3391 req->nic_desc.unicast_mac_count = 0xFFFF;
3392 req->nic_desc.mcc_count = 0xFFFF;
3393 req->nic_desc.vlan_count = 0xFFFF;
3394 req->nic_desc.mcast_mac_count = 0xFFFF;
3395 req->nic_desc.txq_count = 0xFFFF;
3396 req->nic_desc.rq_count = 0xFFFF;
3397 req->nic_desc.rssq_count = 0xFFFF;
3398 req->nic_desc.lro_count = 0xFFFF;
3399 req->nic_desc.cq_count = 0xFFFF;
3400 req->nic_desc.toe_conn_count = 0xFFFF;
3401 req->nic_desc.eq_count = 0xFFFF;
3402 req->nic_desc.link_param = 0xFF;
3403 req->nic_desc.bw_min = 0xFFFFFFFF;
3404 req->nic_desc.acpi_params = 0xFF;
3405 req->nic_desc.wol_param = 0x0F;
3408 req->nic_desc.bw_min = cpu_to_le32(bps);
3409 req->nic_desc.bw_max = cpu_to_le32(bps);
3410 status = be_mcc_notify_wait(adapter);
3412 spin_unlock_bh(&adapter->mcc_lock);
3416 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3419 struct be_mcc_wrb *wrb;
3420 struct be_cmd_req_get_iface_list *req;
3421 struct be_cmd_resp_get_iface_list *resp;
3424 spin_lock_bh(&adapter->mcc_lock);
3426 wrb = wrb_from_mccq(adapter);
3431 req = embedded_payload(wrb);
3433 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3434 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3436 req->hdr.domain = vf_num + 1;
3438 status = be_mcc_notify_wait(adapter);
3440 resp = (struct be_cmd_resp_get_iface_list *)req;
3441 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3445 spin_unlock_bh(&adapter->mcc_lock);
3449 static int lancer_wait_idle(struct be_adapter *adapter)
3451 #define SLIPORT_IDLE_TIMEOUT 30
3455 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3456 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3457 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3463 if (i == SLIPORT_IDLE_TIMEOUT)
3469 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3473 status = lancer_wait_idle(adapter);
3477 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3482 /* Routine to check whether dump image is present or not */
3483 bool dump_present(struct be_adapter *adapter)
3485 u32 sliport_status = 0;
3487 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3488 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3491 int lancer_initiate_dump(struct be_adapter *adapter)
3495 /* give firmware reset and diagnostic dump */
3496 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3497 PHYSDEV_CONTROL_DD_MASK);
3499 dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3503 status = lancer_wait_idle(adapter);
3507 if (!dump_present(adapter)) {
3508 dev_err(&adapter->pdev->dev, "Dump image not present\n");
3516 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3518 struct be_mcc_wrb *wrb;
3519 struct be_cmd_enable_disable_vf *req;
3522 if (!lancer_chip(adapter))
3525 spin_lock_bh(&adapter->mcc_lock);
3527 wrb = wrb_from_mccq(adapter);
3533 req = embedded_payload(wrb);
3535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3536 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3539 req->hdr.domain = domain;
3541 status = be_mcc_notify_wait(adapter);
3543 spin_unlock_bh(&adapter->mcc_lock);
3547 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3549 struct be_mcc_wrb *wrb;
3550 struct be_cmd_req_intr_set *req;
3553 if (mutex_lock_interruptible(&adapter->mbox_lock))
3556 wrb = wrb_from_mbox(adapter);
3558 req = embedded_payload(wrb);
3560 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3561 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3564 req->intr_enabled = intr_enable;
3566 status = be_mbox_notify_wait(adapter);
3568 mutex_unlock(&adapter->mbox_lock);
3572 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3573 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3575 struct be_adapter *adapter = netdev_priv(netdev_handle);
3576 struct be_mcc_wrb *wrb;
3577 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3578 struct be_cmd_req_hdr *req;
3579 struct be_cmd_resp_hdr *resp;
3582 spin_lock_bh(&adapter->mcc_lock);
3584 wrb = wrb_from_mccq(adapter);
3589 req = embedded_payload(wrb);
3590 resp = embedded_payload(wrb);
3592 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3593 hdr->opcode, wrb_payload_size, wrb, NULL);
3594 memcpy(req, wrb_payload, wrb_payload_size);
3595 be_dws_cpu_to_le(req, wrb_payload_size);
3597 status = be_mcc_notify_wait(adapter);
3599 *cmd_status = (status & 0xffff);
3602 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3603 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3605 spin_unlock_bh(&adapter->mcc_lock);
3608 EXPORT_SYMBOL(be_roce_mcc_cmd);