2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
73 return wrb->payload.embedded_payload;
76 static void be_mcc_notify(struct be_adapter *adapter)
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
81 if (be_error(adapter))
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
119 addr = ((addr << 16) << 16) | tag0;
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
130 /* Just swap the status to host endian; mcc tag is opaquely copied
132 be_dws_le_to_cpu(compl, 4);
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191 struct be_async_event_link_state *evt)
193 /* When link status changes, link speed must be re-queried from FW */
194 adapter->phy.link_speed = -1;
196 /* Ignore physical link event */
197 if (lancer_chip(adapter) &&
198 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
201 /* For the initial link status do not rely on the ASYNC event as
202 * it may not be received in some cases.
204 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205 be_link_status_update(adapter, evt->port_link_status);
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210 struct be_async_event_grp5_cos_priority *evt)
213 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215 adapter->recommended_prio =
216 evt->reco_default_priority << VLAN_PRIO_SHIFT;
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222 struct be_async_event_grp5_qos_link_speed *evt)
224 if (adapter->phy.link_speed >= 0 &&
225 evt->physical_port == adapter->port_num)
226 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231 struct be_async_event_grp5_pvid_state *evt)
234 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240 u32 trailer, struct be_mcc_compl *evt)
244 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245 ASYNC_TRAILER_EVENT_TYPE_MASK;
247 switch (event_type) {
248 case ASYNC_EVENT_COS_PRIORITY:
249 be_async_grp5_cos_priority_process(adapter,
250 (struct be_async_event_grp5_cos_priority *)evt);
252 case ASYNC_EVENT_QOS_SPEED:
253 be_async_grp5_qos_speed_process(adapter,
254 (struct be_async_event_grp5_qos_link_speed *)evt);
256 case ASYNC_EVENT_PVID_STATE:
257 be_async_grp5_pvid_state_process(adapter,
258 (struct be_async_event_grp5_pvid_state *)evt);
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
267 static void be_async_dbg_evt_process(struct be_adapter *adapter,
268 u32 trailer, struct be_mcc_compl *cmp)
271 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
273 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
274 ASYNC_TRAILER_EVENT_TYPE_MASK;
276 switch (event_type) {
277 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
279 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
280 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
283 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
289 static inline bool is_link_state_evt(u32 trailer)
291 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
292 ASYNC_TRAILER_EVENT_CODE_MASK) ==
293 ASYNC_EVENT_CODE_LINK_STATE;
296 static inline bool is_grp5_evt(u32 trailer)
298 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
299 ASYNC_TRAILER_EVENT_CODE_MASK) ==
300 ASYNC_EVENT_CODE_GRP_5);
303 static inline bool is_dbg_evt(u32 trailer)
305 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
306 ASYNC_TRAILER_EVENT_CODE_MASK) ==
307 ASYNC_EVENT_CODE_QNQ);
310 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
312 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
313 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
315 if (be_mcc_compl_is_new(compl)) {
316 queue_tail_inc(mcc_cq);
322 void be_async_mcc_enable(struct be_adapter *adapter)
324 spin_lock_bh(&adapter->mcc_cq_lock);
326 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
327 adapter->mcc_obj.rearm_cq = true;
329 spin_unlock_bh(&adapter->mcc_cq_lock);
332 void be_async_mcc_disable(struct be_adapter *adapter)
334 spin_lock_bh(&adapter->mcc_cq_lock);
336 adapter->mcc_obj.rearm_cq = false;
337 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
339 spin_unlock_bh(&adapter->mcc_cq_lock);
342 int be_process_mcc(struct be_adapter *adapter)
344 struct be_mcc_compl *compl;
345 int num = 0, status = 0;
346 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
348 spin_lock(&adapter->mcc_cq_lock);
349 while ((compl = be_mcc_compl_get(adapter))) {
350 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
351 /* Interpret flags as an async trailer */
352 if (is_link_state_evt(compl->flags))
353 be_async_link_state_process(adapter,
354 (struct be_async_event_link_state *) compl);
355 else if (is_grp5_evt(compl->flags))
356 be_async_grp5_evt_process(adapter,
357 compl->flags, compl);
358 else if (is_dbg_evt(compl->flags))
359 be_async_dbg_evt_process(adapter,
360 compl->flags, compl);
361 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
362 status = be_mcc_compl_process(adapter, compl);
363 atomic_dec(&mcc_obj->q.used);
365 be_mcc_compl_use(compl);
370 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
372 spin_unlock(&adapter->mcc_cq_lock);
376 /* Wait till no more pending mcc requests are present */
377 static int be_mcc_wait_compl(struct be_adapter *adapter)
379 #define mcc_timeout 120000 /* 12s timeout */
381 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
383 for (i = 0; i < mcc_timeout; i++) {
384 if (be_error(adapter))
388 status = be_process_mcc(adapter);
391 if (atomic_read(&mcc_obj->q.used) == 0)
395 if (i == mcc_timeout) {
396 dev_err(&adapter->pdev->dev, "FW not responding\n");
397 adapter->fw_timeout = true;
403 /* Notify MCC requests and wait for completion */
404 static int be_mcc_notify_wait(struct be_adapter *adapter)
407 struct be_mcc_wrb *wrb;
408 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
409 u16 index = mcc_obj->q.head;
410 struct be_cmd_resp_hdr *resp;
412 index_dec(&index, mcc_obj->q.len);
413 wrb = queue_index_node(&mcc_obj->q, index);
415 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
417 be_mcc_notify(adapter);
419 status = be_mcc_wait_compl(adapter);
423 status = resp->status;
428 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
434 if (be_error(adapter))
437 ready = ioread32(db);
438 if (ready == 0xffffffff)
441 ready &= MPU_MAILBOX_DB_RDY_MASK;
446 dev_err(&adapter->pdev->dev, "FW not responding\n");
447 adapter->fw_timeout = true;
448 be_detect_error(adapter);
460 * Insert the mailbox address into the doorbell in two steps
461 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
463 static int be_mbox_notify_wait(struct be_adapter *adapter)
467 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
468 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
469 struct be_mcc_mailbox *mbox = mbox_mem->va;
470 struct be_mcc_compl *compl = &mbox->compl;
472 /* wait for ready to be set */
473 status = be_mbox_db_ready_wait(adapter, db);
477 val |= MPU_MAILBOX_DB_HI_MASK;
478 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
479 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
482 /* wait for ready to be set */
483 status = be_mbox_db_ready_wait(adapter, db);
488 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
489 val |= (u32)(mbox_mem->dma >> 4) << 2;
492 status = be_mbox_db_ready_wait(adapter, db);
496 /* A cq entry has been made now */
497 if (be_mcc_compl_is_new(compl)) {
498 status = be_mcc_compl_process(adapter, &mbox->compl);
499 be_mcc_compl_use(compl);
503 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
509 static u16 be_POST_stage_get(struct be_adapter *adapter)
513 if (BEx_chip(adapter))
514 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
516 pci_read_config_dword(adapter->pdev,
517 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
519 return sem & POST_STAGE_MASK;
522 int lancer_wait_ready(struct be_adapter *adapter)
524 #define SLIPORT_READY_TIMEOUT 30
528 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
529 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
530 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
536 if (i == SLIPORT_READY_TIMEOUT)
542 static bool lancer_provisioning_error(struct be_adapter *adapter)
544 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
545 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
546 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
547 sliport_err1 = ioread32(adapter->db +
548 SLIPORT_ERROR1_OFFSET);
549 sliport_err2 = ioread32(adapter->db +
550 SLIPORT_ERROR2_OFFSET);
552 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
553 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
559 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
562 u32 sliport_status, err, reset_needed;
565 resource_error = lancer_provisioning_error(adapter);
569 status = lancer_wait_ready(adapter);
571 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
572 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
573 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
574 if (err && reset_needed) {
575 iowrite32(SLI_PORT_CONTROL_IP_MASK,
576 adapter->db + SLIPORT_CONTROL_OFFSET);
578 /* check adapter has corrected the error */
579 status = lancer_wait_ready(adapter);
580 sliport_status = ioread32(adapter->db +
581 SLIPORT_STATUS_OFFSET);
582 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
583 SLIPORT_STATUS_RN_MASK);
584 if (status || sliport_status)
586 } else if (err || reset_needed) {
590 /* Stop error recovery if error is not recoverable.
591 * No resource error is temporary errors and will go away
592 * when PF provisions resources.
594 resource_error = lancer_provisioning_error(adapter);
601 int be_fw_wait_ready(struct be_adapter *adapter)
604 int status, timeout = 0;
605 struct device *dev = &adapter->pdev->dev;
607 if (lancer_chip(adapter)) {
608 status = lancer_wait_ready(adapter);
613 stage = be_POST_stage_get(adapter);
614 if (stage == POST_STAGE_ARMFW_RDY)
617 dev_info(dev, "Waiting for POST, %ds elapsed\n",
619 if (msleep_interruptible(2000)) {
620 dev_err(dev, "Waiting for POST aborted\n");
624 } while (timeout < 60);
626 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
631 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
633 return &wrb->payload.sgl[0];
636 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
639 wrb->tag0 = addr & 0xFFFFFFFF;
640 wrb->tag1 = upper_32_bits(addr);
643 /* Don't touch the hdr after it's prepared */
644 /* mem will be NULL for embedded commands */
645 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
646 u8 subsystem, u8 opcode, int cmd_len,
647 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
651 req_hdr->opcode = opcode;
652 req_hdr->subsystem = subsystem;
653 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
654 req_hdr->version = 0;
655 fill_wrb_tags(wrb, (ulong) req_hdr);
656 wrb->payload_length = cmd_len;
658 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
659 MCC_WRB_SGE_CNT_SHIFT;
660 sge = nonembedded_sgl(wrb);
661 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
662 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
663 sge->len = cpu_to_le32(mem->size);
665 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
666 be_dws_cpu_to_le(wrb, 8);
669 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
670 struct be_dma_mem *mem)
672 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
673 u64 dma = (u64)mem->dma;
675 for (i = 0; i < buf_pages; i++) {
676 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
677 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
682 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
684 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
685 struct be_mcc_wrb *wrb
686 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
687 memset(wrb, 0, sizeof(*wrb));
691 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
693 struct be_queue_info *mccq = &adapter->mcc_obj.q;
694 struct be_mcc_wrb *wrb;
699 if (atomic_read(&mccq->used) >= mccq->len)
702 wrb = queue_head_node(mccq);
703 queue_head_inc(mccq);
704 atomic_inc(&mccq->used);
705 memset(wrb, 0, sizeof(*wrb));
709 static bool use_mcc(struct be_adapter *adapter)
711 return adapter->mcc_obj.q.created;
714 /* Must be used only in process context */
715 static int be_cmd_lock(struct be_adapter *adapter)
717 if (use_mcc(adapter)) {
718 spin_lock_bh(&adapter->mcc_lock);
721 return mutex_lock_interruptible(&adapter->mbox_lock);
725 /* Must be used only in process context */
726 static void be_cmd_unlock(struct be_adapter *adapter)
728 if (use_mcc(adapter))
729 spin_unlock_bh(&adapter->mcc_lock);
731 return mutex_unlock(&adapter->mbox_lock);
734 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
735 struct be_mcc_wrb *wrb)
737 struct be_mcc_wrb *dest_wrb;
739 if (use_mcc(adapter)) {
740 dest_wrb = wrb_from_mccq(adapter);
744 dest_wrb = wrb_from_mbox(adapter);
747 memcpy(dest_wrb, wrb, sizeof(*wrb));
748 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
749 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
754 /* Must be used only in process context */
755 static int be_cmd_notify_wait(struct be_adapter *adapter,
756 struct be_mcc_wrb *wrb)
758 struct be_mcc_wrb *dest_wrb;
761 status = be_cmd_lock(adapter);
765 dest_wrb = be_cmd_copy(adapter, wrb);
769 if (use_mcc(adapter))
770 status = be_mcc_notify_wait(adapter);
772 status = be_mbox_notify_wait(adapter);
775 memcpy(wrb, dest_wrb, sizeof(*wrb));
777 be_cmd_unlock(adapter);
781 /* Tell fw we're about to start firing cmds by writing a
782 * special pattern across the wrb hdr; uses mbox
784 int be_cmd_fw_init(struct be_adapter *adapter)
789 if (lancer_chip(adapter))
792 if (mutex_lock_interruptible(&adapter->mbox_lock))
795 wrb = (u8 *)wrb_from_mbox(adapter);
805 status = be_mbox_notify_wait(adapter);
807 mutex_unlock(&adapter->mbox_lock);
811 /* Tell fw we're done with firing cmds by writing a
812 * special pattern across the wrb hdr; uses mbox
814 int be_cmd_fw_clean(struct be_adapter *adapter)
819 if (lancer_chip(adapter))
822 if (mutex_lock_interruptible(&adapter->mbox_lock))
825 wrb = (u8 *)wrb_from_mbox(adapter);
835 status = be_mbox_notify_wait(adapter);
837 mutex_unlock(&adapter->mbox_lock);
841 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
843 struct be_mcc_wrb *wrb;
844 struct be_cmd_req_eq_create *req;
845 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
848 if (mutex_lock_interruptible(&adapter->mbox_lock))
851 wrb = wrb_from_mbox(adapter);
852 req = embedded_payload(wrb);
854 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
855 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
857 /* Support for EQ_CREATEv2 available only SH-R onwards */
858 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
861 req->hdr.version = ver;
862 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
864 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
866 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
867 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
868 __ilog2_u32(eqo->q.len / 256));
869 be_dws_cpu_to_le(req->context, sizeof(req->context));
871 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
873 status = be_mbox_notify_wait(adapter);
875 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
876 eqo->q.id = le16_to_cpu(resp->eq_id);
878 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
879 eqo->q.created = true;
882 mutex_unlock(&adapter->mbox_lock);
887 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
888 bool permanent, u32 if_handle, u32 pmac_id)
890 struct be_mcc_wrb *wrb;
891 struct be_cmd_req_mac_query *req;
894 spin_lock_bh(&adapter->mcc_lock);
896 wrb = wrb_from_mccq(adapter);
901 req = embedded_payload(wrb);
903 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
904 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
905 req->type = MAC_ADDRESS_TYPE_NETWORK;
909 req->if_id = cpu_to_le16((u16) if_handle);
910 req->pmac_id = cpu_to_le32(pmac_id);
914 status = be_mcc_notify_wait(adapter);
916 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
917 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
921 spin_unlock_bh(&adapter->mcc_lock);
925 /* Uses synchronous MCCQ */
926 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
927 u32 if_id, u32 *pmac_id, u32 domain)
929 struct be_mcc_wrb *wrb;
930 struct be_cmd_req_pmac_add *req;
933 spin_lock_bh(&adapter->mcc_lock);
935 wrb = wrb_from_mccq(adapter);
940 req = embedded_payload(wrb);
942 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
943 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
945 req->hdr.domain = domain;
946 req->if_id = cpu_to_le32(if_id);
947 memcpy(req->mac_address, mac_addr, ETH_ALEN);
949 status = be_mcc_notify_wait(adapter);
951 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
952 *pmac_id = le32_to_cpu(resp->pmac_id);
956 spin_unlock_bh(&adapter->mcc_lock);
958 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
964 /* Uses synchronous MCCQ */
965 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
967 struct be_mcc_wrb *wrb;
968 struct be_cmd_req_pmac_del *req;
974 spin_lock_bh(&adapter->mcc_lock);
976 wrb = wrb_from_mccq(adapter);
981 req = embedded_payload(wrb);
983 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
984 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
986 req->hdr.domain = dom;
987 req->if_id = cpu_to_le32(if_id);
988 req->pmac_id = cpu_to_le32(pmac_id);
990 status = be_mcc_notify_wait(adapter);
993 spin_unlock_bh(&adapter->mcc_lock);
998 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
999 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1001 struct be_mcc_wrb *wrb;
1002 struct be_cmd_req_cq_create *req;
1003 struct be_dma_mem *q_mem = &cq->dma_mem;
1007 if (mutex_lock_interruptible(&adapter->mbox_lock))
1010 wrb = wrb_from_mbox(adapter);
1011 req = embedded_payload(wrb);
1012 ctxt = &req->context;
1014 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1015 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
1017 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1019 if (BEx_chip(adapter)) {
1020 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1022 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1024 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1025 __ilog2_u32(cq->len/256));
1026 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1027 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1028 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1030 req->hdr.version = 2;
1031 req->page_size = 1; /* 1 for 4K */
1032 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1034 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1035 __ilog2_u32(cq->len/256));
1036 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1037 AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
1039 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1043 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1045 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1047 status = be_mbox_notify_wait(adapter);
1049 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1050 cq->id = le16_to_cpu(resp->cq_id);
1054 mutex_unlock(&adapter->mbox_lock);
1059 static u32 be_encoded_q_len(int q_len)
1061 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1062 if (len_encoded == 16)
1067 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1068 struct be_queue_info *mccq,
1069 struct be_queue_info *cq)
1071 struct be_mcc_wrb *wrb;
1072 struct be_cmd_req_mcc_ext_create *req;
1073 struct be_dma_mem *q_mem = &mccq->dma_mem;
1077 if (mutex_lock_interruptible(&adapter->mbox_lock))
1080 wrb = wrb_from_mbox(adapter);
1081 req = embedded_payload(wrb);
1082 ctxt = &req->context;
1084 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1085 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1087 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1088 if (lancer_chip(adapter)) {
1089 req->hdr.version = 1;
1090 req->cq_id = cpu_to_le16(cq->id);
1092 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1093 be_encoded_q_len(mccq->len));
1094 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1095 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1097 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1101 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1102 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1103 be_encoded_q_len(mccq->len));
1104 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1107 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1108 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1109 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1110 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1112 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1114 status = be_mbox_notify_wait(adapter);
1116 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1117 mccq->id = le16_to_cpu(resp->id);
1118 mccq->created = true;
1120 mutex_unlock(&adapter->mbox_lock);
1125 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1126 struct be_queue_info *mccq,
1127 struct be_queue_info *cq)
1129 struct be_mcc_wrb *wrb;
1130 struct be_cmd_req_mcc_create *req;
1131 struct be_dma_mem *q_mem = &mccq->dma_mem;
1135 if (mutex_lock_interruptible(&adapter->mbox_lock))
1138 wrb = wrb_from_mbox(adapter);
1139 req = embedded_payload(wrb);
1140 ctxt = &req->context;
1142 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1143 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1145 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1147 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1148 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1149 be_encoded_q_len(mccq->len));
1150 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1152 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1154 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1156 status = be_mbox_notify_wait(adapter);
1158 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1159 mccq->id = le16_to_cpu(resp->id);
1160 mccq->created = true;
1163 mutex_unlock(&adapter->mbox_lock);
1167 int be_cmd_mccq_create(struct be_adapter *adapter,
1168 struct be_queue_info *mccq,
1169 struct be_queue_info *cq)
1173 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1174 if (status && !lancer_chip(adapter)) {
1175 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1176 "or newer to avoid conflicting priorities between NIC "
1177 "and FCoE traffic");
1178 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1183 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1185 struct be_mcc_wrb wrb = {0};
1186 struct be_cmd_req_eth_tx_create *req;
1187 struct be_queue_info *txq = &txo->q;
1188 struct be_queue_info *cq = &txo->cq;
1189 struct be_dma_mem *q_mem = &txq->dma_mem;
1190 int status, ver = 0;
1192 req = embedded_payload(&wrb);
1193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1194 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1196 if (lancer_chip(adapter)) {
1197 req->hdr.version = 1;
1198 req->if_id = cpu_to_le16(adapter->if_handle);
1199 } else if (BEx_chip(adapter)) {
1200 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1201 req->hdr.version = 2;
1202 } else { /* For SH */
1203 req->hdr.version = 2;
1206 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1207 req->ulp_num = BE_ULP1_NUM;
1208 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1209 req->cq_id = cpu_to_le16(cq->id);
1210 req->queue_size = be_encoded_q_len(txq->len);
1211 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1212 ver = req->hdr.version;
1214 status = be_cmd_notify_wait(adapter, &wrb);
1216 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1217 txq->id = le16_to_cpu(resp->cid);
1219 txo->db_offset = le32_to_cpu(resp->db_offset);
1221 txo->db_offset = DB_TXULP1_OFFSET;
1222 txq->created = true;
1229 int be_cmd_rxq_create(struct be_adapter *adapter,
1230 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1231 u32 if_id, u32 rss, u8 *rss_id)
1233 struct be_mcc_wrb *wrb;
1234 struct be_cmd_req_eth_rx_create *req;
1235 struct be_dma_mem *q_mem = &rxq->dma_mem;
1238 spin_lock_bh(&adapter->mcc_lock);
1240 wrb = wrb_from_mccq(adapter);
1245 req = embedded_payload(wrb);
1247 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1248 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1250 req->cq_id = cpu_to_le16(cq_id);
1251 req->frag_size = fls(frag_size) - 1;
1253 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1254 req->interface_id = cpu_to_le32(if_id);
1255 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1256 req->rss_queue = cpu_to_le32(rss);
1258 status = be_mcc_notify_wait(adapter);
1260 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1261 rxq->id = le16_to_cpu(resp->id);
1262 rxq->created = true;
1263 *rss_id = resp->rss_id;
1267 spin_unlock_bh(&adapter->mcc_lock);
1271 /* Generic destroyer function for all types of queues
1274 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1277 struct be_mcc_wrb *wrb;
1278 struct be_cmd_req_q_destroy *req;
1279 u8 subsys = 0, opcode = 0;
1282 if (mutex_lock_interruptible(&adapter->mbox_lock))
1285 wrb = wrb_from_mbox(adapter);
1286 req = embedded_payload(wrb);
1288 switch (queue_type) {
1290 subsys = CMD_SUBSYSTEM_COMMON;
1291 opcode = OPCODE_COMMON_EQ_DESTROY;
1294 subsys = CMD_SUBSYSTEM_COMMON;
1295 opcode = OPCODE_COMMON_CQ_DESTROY;
1298 subsys = CMD_SUBSYSTEM_ETH;
1299 opcode = OPCODE_ETH_TX_DESTROY;
1302 subsys = CMD_SUBSYSTEM_ETH;
1303 opcode = OPCODE_ETH_RX_DESTROY;
1306 subsys = CMD_SUBSYSTEM_COMMON;
1307 opcode = OPCODE_COMMON_MCC_DESTROY;
1313 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1315 req->id = cpu_to_le16(q->id);
1317 status = be_mbox_notify_wait(adapter);
1320 mutex_unlock(&adapter->mbox_lock);
1325 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1327 struct be_mcc_wrb *wrb;
1328 struct be_cmd_req_q_destroy *req;
1331 spin_lock_bh(&adapter->mcc_lock);
1333 wrb = wrb_from_mccq(adapter);
1338 req = embedded_payload(wrb);
1340 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1341 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1342 req->id = cpu_to_le16(q->id);
1344 status = be_mcc_notify_wait(adapter);
1348 spin_unlock_bh(&adapter->mcc_lock);
1352 /* Create an rx filtering policy configuration on an i/f
1353 * Will use MBOX only if MCCQ has not been created.
1355 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1356 u32 *if_handle, u32 domain)
1358 struct be_mcc_wrb wrb = {0};
1359 struct be_cmd_req_if_create *req;
1362 req = embedded_payload(&wrb);
1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1364 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
1365 req->hdr.domain = domain;
1366 req->capability_flags = cpu_to_le32(cap_flags);
1367 req->enable_flags = cpu_to_le32(en_flags);
1368 req->pmac_invalid = true;
1370 status = be_cmd_notify_wait(adapter, &wrb);
1372 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1373 *if_handle = le32_to_cpu(resp->interface_id);
1375 /* Hack to retrieve VF's pmac-id on BE3 */
1376 if (BE3_chip(adapter) && !be_physfn(adapter))
1377 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1383 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1385 struct be_mcc_wrb *wrb;
1386 struct be_cmd_req_if_destroy *req;
1389 if (interface_id == -1)
1392 spin_lock_bh(&adapter->mcc_lock);
1394 wrb = wrb_from_mccq(adapter);
1399 req = embedded_payload(wrb);
1401 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1402 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1403 req->hdr.domain = domain;
1404 req->interface_id = cpu_to_le32(interface_id);
1406 status = be_mcc_notify_wait(adapter);
1408 spin_unlock_bh(&adapter->mcc_lock);
1412 /* Get stats is a non embedded command: the request is not embedded inside
1413 * WRB but is a separate dma memory block
1414 * Uses asynchronous MCC
1416 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1418 struct be_mcc_wrb *wrb;
1419 struct be_cmd_req_hdr *hdr;
1422 spin_lock_bh(&adapter->mcc_lock);
1424 wrb = wrb_from_mccq(adapter);
1429 hdr = nonemb_cmd->va;
1431 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1432 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1434 /* version 1 of the cmd is not supported only by BE2 */
1435 if (!BE2_chip(adapter))
1438 be_mcc_notify(adapter);
1439 adapter->stats_cmd_sent = true;
1442 spin_unlock_bh(&adapter->mcc_lock);
1447 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1448 struct be_dma_mem *nonemb_cmd)
1451 struct be_mcc_wrb *wrb;
1452 struct lancer_cmd_req_pport_stats *req;
1455 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1459 spin_lock_bh(&adapter->mcc_lock);
1461 wrb = wrb_from_mccq(adapter);
1466 req = nonemb_cmd->va;
1468 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1469 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1472 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1473 req->cmd_params.params.reset_stats = 0;
1475 be_mcc_notify(adapter);
1476 adapter->stats_cmd_sent = true;
1479 spin_unlock_bh(&adapter->mcc_lock);
1483 static int be_mac_to_link_speed(int mac_speed)
1485 switch (mac_speed) {
1486 case PHY_LINK_SPEED_ZERO:
1488 case PHY_LINK_SPEED_10MBPS:
1490 case PHY_LINK_SPEED_100MBPS:
1492 case PHY_LINK_SPEED_1GBPS:
1494 case PHY_LINK_SPEED_10GBPS:
1496 case PHY_LINK_SPEED_20GBPS:
1498 case PHY_LINK_SPEED_25GBPS:
1500 case PHY_LINK_SPEED_40GBPS:
1506 /* Uses synchronous mcc
1507 * Returns link_speed in Mbps
1509 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1510 u8 *link_status, u32 dom)
1512 struct be_mcc_wrb *wrb;
1513 struct be_cmd_req_link_status *req;
1516 spin_lock_bh(&adapter->mcc_lock);
1519 *link_status = LINK_DOWN;
1521 wrb = wrb_from_mccq(adapter);
1526 req = embedded_payload(wrb);
1528 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1529 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1531 /* version 1 of the cmd is not supported only by BE2 */
1532 if (!BE2_chip(adapter))
1533 req->hdr.version = 1;
1535 req->hdr.domain = dom;
1537 status = be_mcc_notify_wait(adapter);
1539 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1541 *link_speed = resp->link_speed ?
1542 le16_to_cpu(resp->link_speed) * 10 :
1543 be_mac_to_link_speed(resp->mac_speed);
1545 if (!resp->logical_link_status)
1549 *link_status = resp->logical_link_status;
1553 spin_unlock_bh(&adapter->mcc_lock);
1557 /* Uses synchronous mcc */
1558 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1560 struct be_mcc_wrb *wrb;
1561 struct be_cmd_req_get_cntl_addnl_attribs *req;
1564 spin_lock_bh(&adapter->mcc_lock);
1566 wrb = wrb_from_mccq(adapter);
1571 req = embedded_payload(wrb);
1573 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1574 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1577 be_mcc_notify(adapter);
1580 spin_unlock_bh(&adapter->mcc_lock);
1584 /* Uses synchronous mcc */
1585 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1587 struct be_mcc_wrb *wrb;
1588 struct be_cmd_req_get_fat *req;
1591 spin_lock_bh(&adapter->mcc_lock);
1593 wrb = wrb_from_mccq(adapter);
1598 req = embedded_payload(wrb);
1600 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1601 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1602 req->fat_operation = cpu_to_le32(QUERY_FAT);
1603 status = be_mcc_notify_wait(adapter);
1605 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1606 if (log_size && resp->log_size)
1607 *log_size = le32_to_cpu(resp->log_size) -
1611 spin_unlock_bh(&adapter->mcc_lock);
1615 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1617 struct be_dma_mem get_fat_cmd;
1618 struct be_mcc_wrb *wrb;
1619 struct be_cmd_req_get_fat *req;
1620 u32 offset = 0, total_size, buf_size,
1621 log_offset = sizeof(u32), payload_len;
1627 total_size = buf_len;
1629 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1630 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1633 if (!get_fat_cmd.va) {
1635 dev_err(&adapter->pdev->dev,
1636 "Memory allocation failure while retrieving FAT data\n");
1640 spin_lock_bh(&adapter->mcc_lock);
1642 while (total_size) {
1643 buf_size = min(total_size, (u32)60*1024);
1644 total_size -= buf_size;
1646 wrb = wrb_from_mccq(adapter);
1651 req = get_fat_cmd.va;
1653 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1654 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1655 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1658 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1659 req->read_log_offset = cpu_to_le32(log_offset);
1660 req->read_log_length = cpu_to_le32(buf_size);
1661 req->data_buffer_size = cpu_to_le32(buf_size);
1663 status = be_mcc_notify_wait(adapter);
1665 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1666 memcpy(buf + offset,
1668 le32_to_cpu(resp->read_log_length));
1670 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1674 log_offset += buf_size;
1677 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1680 spin_unlock_bh(&adapter->mcc_lock);
1683 /* Uses synchronous mcc */
1684 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1687 struct be_mcc_wrb *wrb;
1688 struct be_cmd_req_get_fw_version *req;
1691 spin_lock_bh(&adapter->mcc_lock);
1693 wrb = wrb_from_mccq(adapter);
1699 req = embedded_payload(wrb);
1701 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1702 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1703 status = be_mcc_notify_wait(adapter);
1705 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1706 strcpy(fw_ver, resp->firmware_version_string);
1708 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1711 spin_unlock_bh(&adapter->mcc_lock);
1715 /* set the EQ delay interval of an EQ to specified value
1718 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1720 struct be_mcc_wrb *wrb;
1721 struct be_cmd_req_modify_eq_delay *req;
1724 spin_lock_bh(&adapter->mcc_lock);
1726 wrb = wrb_from_mccq(adapter);
1731 req = embedded_payload(wrb);
1733 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1734 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1736 req->num_eq = cpu_to_le32(1);
1737 req->delay[0].eq_id = cpu_to_le32(eq_id);
1738 req->delay[0].phase = 0;
1739 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1741 be_mcc_notify(adapter);
1744 spin_unlock_bh(&adapter->mcc_lock);
1748 /* Uses sycnhronous mcc */
1749 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1750 u32 num, bool untagged, bool promiscuous)
1752 struct be_mcc_wrb *wrb;
1753 struct be_cmd_req_vlan_config *req;
1756 spin_lock_bh(&adapter->mcc_lock);
1758 wrb = wrb_from_mccq(adapter);
1763 req = embedded_payload(wrb);
1765 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1766 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1768 req->interface_id = if_id;
1769 req->promiscuous = promiscuous;
1770 req->untagged = untagged;
1771 req->num_vlan = num;
1773 memcpy(req->normal_vlan, vtag_array,
1774 req->num_vlan * sizeof(vtag_array[0]));
1777 status = be_mcc_notify_wait(adapter);
1780 spin_unlock_bh(&adapter->mcc_lock);
1784 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1786 struct be_mcc_wrb *wrb;
1787 struct be_dma_mem *mem = &adapter->rx_filter;
1788 struct be_cmd_req_rx_filter *req = mem->va;
1791 spin_lock_bh(&adapter->mcc_lock);
1793 wrb = wrb_from_mccq(adapter);
1798 memset(req, 0, sizeof(*req));
1799 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1800 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1803 req->if_id = cpu_to_le32(adapter->if_handle);
1804 if (flags & IFF_PROMISC) {
1805 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1806 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1807 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1809 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1810 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1811 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1812 } else if (flags & IFF_ALLMULTI) {
1813 req->if_flags_mask = req->if_flags =
1814 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1816 struct netdev_hw_addr *ha;
1819 req->if_flags_mask = req->if_flags =
1820 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1822 /* Reset mcast promisc mode if already set by setting mask
1823 * and not setting flags field
1825 req->if_flags_mask |=
1826 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1827 be_if_cap_flags(adapter));
1828 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1829 netdev_for_each_mc_addr(ha, adapter->netdev)
1830 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1833 status = be_mcc_notify_wait(adapter);
1835 spin_unlock_bh(&adapter->mcc_lock);
1839 /* Uses synchrounous mcc */
1840 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1842 struct be_mcc_wrb *wrb;
1843 struct be_cmd_req_set_flow_control *req;
1846 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1847 CMD_SUBSYSTEM_COMMON))
1850 spin_lock_bh(&adapter->mcc_lock);
1852 wrb = wrb_from_mccq(adapter);
1857 req = embedded_payload(wrb);
1859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1860 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1862 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1863 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1865 status = be_mcc_notify_wait(adapter);
1868 spin_unlock_bh(&adapter->mcc_lock);
1873 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1875 struct be_mcc_wrb *wrb;
1876 struct be_cmd_req_get_flow_control *req;
1879 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1880 CMD_SUBSYSTEM_COMMON))
1883 spin_lock_bh(&adapter->mcc_lock);
1885 wrb = wrb_from_mccq(adapter);
1890 req = embedded_payload(wrb);
1892 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1893 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1895 status = be_mcc_notify_wait(adapter);
1897 struct be_cmd_resp_get_flow_control *resp =
1898 embedded_payload(wrb);
1899 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1900 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1904 spin_unlock_bh(&adapter->mcc_lock);
1909 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1910 u32 *mode, u32 *caps, u16 *asic_rev)
1912 struct be_mcc_wrb *wrb;
1913 struct be_cmd_req_query_fw_cfg *req;
1916 if (mutex_lock_interruptible(&adapter->mbox_lock))
1919 wrb = wrb_from_mbox(adapter);
1920 req = embedded_payload(wrb);
1922 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1923 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1925 status = be_mbox_notify_wait(adapter);
1927 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1928 *port_num = le32_to_cpu(resp->phys_port);
1929 *mode = le32_to_cpu(resp->function_mode);
1930 *caps = le32_to_cpu(resp->function_caps);
1931 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1934 mutex_unlock(&adapter->mbox_lock);
1939 int be_cmd_reset_function(struct be_adapter *adapter)
1941 struct be_mcc_wrb *wrb;
1942 struct be_cmd_req_hdr *req;
1945 if (lancer_chip(adapter)) {
1946 status = lancer_wait_ready(adapter);
1948 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1949 adapter->db + SLIPORT_CONTROL_OFFSET);
1950 status = lancer_test_and_set_rdy_state(adapter);
1953 dev_err(&adapter->pdev->dev,
1954 "Adapter in non recoverable error\n");
1959 if (mutex_lock_interruptible(&adapter->mbox_lock))
1962 wrb = wrb_from_mbox(adapter);
1963 req = embedded_payload(wrb);
1965 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1966 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1968 status = be_mbox_notify_wait(adapter);
1970 mutex_unlock(&adapter->mbox_lock);
1974 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1975 u32 rss_hash_opts, u16 table_size)
1977 struct be_mcc_wrb *wrb;
1978 struct be_cmd_req_rss_config *req;
1979 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1980 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1981 0x3ea83c02, 0x4a110304};
1984 if (mutex_lock_interruptible(&adapter->mbox_lock))
1987 wrb = wrb_from_mbox(adapter);
1988 req = embedded_payload(wrb);
1990 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1991 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1993 req->if_id = cpu_to_le32(adapter->if_handle);
1994 req->enable_rss = cpu_to_le16(rss_hash_opts);
1995 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1997 if (lancer_chip(adapter) || skyhawk_chip(adapter))
1998 req->hdr.version = 1;
2000 memcpy(req->cpu_table, rsstable, table_size);
2001 memcpy(req->hash, myhash, sizeof(myhash));
2002 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2004 status = be_mbox_notify_wait(adapter);
2006 mutex_unlock(&adapter->mbox_lock);
2011 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2012 u8 bcn, u8 sts, u8 state)
2014 struct be_mcc_wrb *wrb;
2015 struct be_cmd_req_enable_disable_beacon *req;
2018 spin_lock_bh(&adapter->mcc_lock);
2020 wrb = wrb_from_mccq(adapter);
2025 req = embedded_payload(wrb);
2027 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2028 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
2030 req->port_num = port_num;
2031 req->beacon_state = state;
2032 req->beacon_duration = bcn;
2033 req->status_duration = sts;
2035 status = be_mcc_notify_wait(adapter);
2038 spin_unlock_bh(&adapter->mcc_lock);
2043 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2045 struct be_mcc_wrb *wrb;
2046 struct be_cmd_req_get_beacon_state *req;
2049 spin_lock_bh(&adapter->mcc_lock);
2051 wrb = wrb_from_mccq(adapter);
2056 req = embedded_payload(wrb);
2058 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2059 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
2061 req->port_num = port_num;
2063 status = be_mcc_notify_wait(adapter);
2065 struct be_cmd_resp_get_beacon_state *resp =
2066 embedded_payload(wrb);
2067 *state = resp->beacon_state;
2071 spin_unlock_bh(&adapter->mcc_lock);
2075 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2076 u32 data_size, u32 data_offset,
2077 const char *obj_name, u32 *data_written,
2078 u8 *change_status, u8 *addn_status)
2080 struct be_mcc_wrb *wrb;
2081 struct lancer_cmd_req_write_object *req;
2082 struct lancer_cmd_resp_write_object *resp;
2086 spin_lock_bh(&adapter->mcc_lock);
2087 adapter->flash_status = 0;
2089 wrb = wrb_from_mccq(adapter);
2095 req = embedded_payload(wrb);
2097 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2098 OPCODE_COMMON_WRITE_OBJECT,
2099 sizeof(struct lancer_cmd_req_write_object), wrb,
2102 ctxt = &req->context;
2103 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2104 write_length, ctxt, data_size);
2107 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2110 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2113 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2114 req->write_offset = cpu_to_le32(data_offset);
2115 strcpy(req->object_name, obj_name);
2116 req->descriptor_count = cpu_to_le32(1);
2117 req->buf_len = cpu_to_le32(data_size);
2118 req->addr_low = cpu_to_le32((cmd->dma +
2119 sizeof(struct lancer_cmd_req_write_object))
2121 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2122 sizeof(struct lancer_cmd_req_write_object)));
2124 be_mcc_notify(adapter);
2125 spin_unlock_bh(&adapter->mcc_lock);
2127 if (!wait_for_completion_timeout(&adapter->flash_compl,
2128 msecs_to_jiffies(60000)))
2131 status = adapter->flash_status;
2133 resp = embedded_payload(wrb);
2135 *data_written = le32_to_cpu(resp->actual_write_len);
2136 *change_status = resp->change_status;
2138 *addn_status = resp->additional_status;
2144 spin_unlock_bh(&adapter->mcc_lock);
2148 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2149 u32 data_size, u32 data_offset, const char *obj_name,
2150 u32 *data_read, u32 *eof, u8 *addn_status)
2152 struct be_mcc_wrb *wrb;
2153 struct lancer_cmd_req_read_object *req;
2154 struct lancer_cmd_resp_read_object *resp;
2157 spin_lock_bh(&adapter->mcc_lock);
2159 wrb = wrb_from_mccq(adapter);
2165 req = embedded_payload(wrb);
2167 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2168 OPCODE_COMMON_READ_OBJECT,
2169 sizeof(struct lancer_cmd_req_read_object), wrb,
2172 req->desired_read_len = cpu_to_le32(data_size);
2173 req->read_offset = cpu_to_le32(data_offset);
2174 strcpy(req->object_name, obj_name);
2175 req->descriptor_count = cpu_to_le32(1);
2176 req->buf_len = cpu_to_le32(data_size);
2177 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2178 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2180 status = be_mcc_notify_wait(adapter);
2182 resp = embedded_payload(wrb);
2184 *data_read = le32_to_cpu(resp->actual_read_len);
2185 *eof = le32_to_cpu(resp->eof);
2187 *addn_status = resp->additional_status;
2191 spin_unlock_bh(&adapter->mcc_lock);
2195 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2196 u32 flash_type, u32 flash_opcode, u32 buf_size)
2198 struct be_mcc_wrb *wrb;
2199 struct be_cmd_write_flashrom *req;
2202 spin_lock_bh(&adapter->mcc_lock);
2203 adapter->flash_status = 0;
2205 wrb = wrb_from_mccq(adapter);
2212 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2213 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2215 req->params.op_type = cpu_to_le32(flash_type);
2216 req->params.op_code = cpu_to_le32(flash_opcode);
2217 req->params.data_buf_size = cpu_to_le32(buf_size);
2219 be_mcc_notify(adapter);
2220 spin_unlock_bh(&adapter->mcc_lock);
2222 if (!wait_for_completion_timeout(&adapter->flash_compl,
2223 msecs_to_jiffies(40000)))
2226 status = adapter->flash_status;
2231 spin_unlock_bh(&adapter->mcc_lock);
2235 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2238 struct be_mcc_wrb *wrb;
2239 struct be_cmd_read_flash_crc *req;
2242 spin_lock_bh(&adapter->mcc_lock);
2244 wrb = wrb_from_mccq(adapter);
2249 req = embedded_payload(wrb);
2251 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2252 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2255 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2256 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2257 req->params.offset = cpu_to_le32(offset);
2258 req->params.data_buf_size = cpu_to_le32(0x4);
2260 status = be_mcc_notify_wait(adapter);
2262 memcpy(flashed_crc, req->crc, 4);
2265 spin_unlock_bh(&adapter->mcc_lock);
2269 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2270 struct be_dma_mem *nonemb_cmd)
2272 struct be_mcc_wrb *wrb;
2273 struct be_cmd_req_acpi_wol_magic_config *req;
2276 spin_lock_bh(&adapter->mcc_lock);
2278 wrb = wrb_from_mccq(adapter);
2283 req = nonemb_cmd->va;
2285 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2286 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2288 memcpy(req->magic_mac, mac, ETH_ALEN);
2290 status = be_mcc_notify_wait(adapter);
2293 spin_unlock_bh(&adapter->mcc_lock);
2297 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2298 u8 loopback_type, u8 enable)
2300 struct be_mcc_wrb *wrb;
2301 struct be_cmd_req_set_lmode *req;
2304 spin_lock_bh(&adapter->mcc_lock);
2306 wrb = wrb_from_mccq(adapter);
2312 req = embedded_payload(wrb);
2314 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2315 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2318 req->src_port = port_num;
2319 req->dest_port = port_num;
2320 req->loopback_type = loopback_type;
2321 req->loopback_state = enable;
2323 status = be_mcc_notify_wait(adapter);
2325 spin_unlock_bh(&adapter->mcc_lock);
2329 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2330 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2332 struct be_mcc_wrb *wrb;
2333 struct be_cmd_req_loopback_test *req;
2336 spin_lock_bh(&adapter->mcc_lock);
2338 wrb = wrb_from_mccq(adapter);
2344 req = embedded_payload(wrb);
2346 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2347 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2348 req->hdr.timeout = cpu_to_le32(4);
2350 req->pattern = cpu_to_le64(pattern);
2351 req->src_port = cpu_to_le32(port_num);
2352 req->dest_port = cpu_to_le32(port_num);
2353 req->pkt_size = cpu_to_le32(pkt_size);
2354 req->num_pkts = cpu_to_le32(num_pkts);
2355 req->loopback_type = cpu_to_le32(loopback_type);
2357 status = be_mcc_notify_wait(adapter);
2359 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2360 status = le32_to_cpu(resp->status);
2364 spin_unlock_bh(&adapter->mcc_lock);
2368 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2369 u32 byte_cnt, struct be_dma_mem *cmd)
2371 struct be_mcc_wrb *wrb;
2372 struct be_cmd_req_ddrdma_test *req;
2376 spin_lock_bh(&adapter->mcc_lock);
2378 wrb = wrb_from_mccq(adapter);
2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2385 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2387 req->pattern = cpu_to_le64(pattern);
2388 req->byte_count = cpu_to_le32(byte_cnt);
2389 for (i = 0; i < byte_cnt; i++) {
2390 req->snd_buff[i] = (u8)(pattern >> (j*8));
2396 status = be_mcc_notify_wait(adapter);
2399 struct be_cmd_resp_ddrdma_test *resp;
2401 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2408 spin_unlock_bh(&adapter->mcc_lock);
2412 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2413 struct be_dma_mem *nonemb_cmd)
2415 struct be_mcc_wrb *wrb;
2416 struct be_cmd_req_seeprom_read *req;
2419 spin_lock_bh(&adapter->mcc_lock);
2421 wrb = wrb_from_mccq(adapter);
2426 req = nonemb_cmd->va;
2428 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2429 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2432 status = be_mcc_notify_wait(adapter);
2435 spin_unlock_bh(&adapter->mcc_lock);
2439 int be_cmd_get_phy_info(struct be_adapter *adapter)
2441 struct be_mcc_wrb *wrb;
2442 struct be_cmd_req_get_phy_info *req;
2443 struct be_dma_mem cmd;
2446 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2447 CMD_SUBSYSTEM_COMMON))
2450 spin_lock_bh(&adapter->mcc_lock);
2452 wrb = wrb_from_mccq(adapter);
2457 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2458 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2461 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2468 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2469 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2472 status = be_mcc_notify_wait(adapter);
2474 struct be_phy_info *resp_phy_info =
2475 cmd.va + sizeof(struct be_cmd_req_hdr);
2476 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2477 adapter->phy.interface_type =
2478 le16_to_cpu(resp_phy_info->interface_type);
2479 adapter->phy.auto_speeds_supported =
2480 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2481 adapter->phy.fixed_speeds_supported =
2482 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2483 adapter->phy.misc_params =
2484 le32_to_cpu(resp_phy_info->misc_params);
2486 if (BE2_chip(adapter)) {
2487 adapter->phy.fixed_speeds_supported =
2488 BE_SUPPORTED_SPEED_10GBPS |
2489 BE_SUPPORTED_SPEED_1GBPS;
2492 pci_free_consistent(adapter->pdev, cmd.size,
2495 spin_unlock_bh(&adapter->mcc_lock);
2499 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2501 struct be_mcc_wrb *wrb;
2502 struct be_cmd_req_set_qos *req;
2505 spin_lock_bh(&adapter->mcc_lock);
2507 wrb = wrb_from_mccq(adapter);
2513 req = embedded_payload(wrb);
2515 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2516 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2518 req->hdr.domain = domain;
2519 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2520 req->max_bps_nic = cpu_to_le32(bps);
2522 status = be_mcc_notify_wait(adapter);
2525 spin_unlock_bh(&adapter->mcc_lock);
2529 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2531 struct be_mcc_wrb *wrb;
2532 struct be_cmd_req_cntl_attribs *req;
2533 struct be_cmd_resp_cntl_attribs *resp;
2535 int payload_len = max(sizeof(*req), sizeof(*resp));
2536 struct mgmt_controller_attrib *attribs;
2537 struct be_dma_mem attribs_cmd;
2539 if (mutex_lock_interruptible(&adapter->mbox_lock))
2542 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2543 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2544 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2546 if (!attribs_cmd.va) {
2547 dev_err(&adapter->pdev->dev,
2548 "Memory allocation failure\n");
2553 wrb = wrb_from_mbox(adapter);
2558 req = attribs_cmd.va;
2560 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2561 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2564 status = be_mbox_notify_wait(adapter);
2566 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2567 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2571 mutex_unlock(&adapter->mbox_lock);
2573 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2574 attribs_cmd.va, attribs_cmd.dma);
2579 int be_cmd_req_native_mode(struct be_adapter *adapter)
2581 struct be_mcc_wrb *wrb;
2582 struct be_cmd_req_set_func_cap *req;
2585 if (mutex_lock_interruptible(&adapter->mbox_lock))
2588 wrb = wrb_from_mbox(adapter);
2594 req = embedded_payload(wrb);
2596 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2597 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2599 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2600 CAPABILITY_BE3_NATIVE_ERX_API);
2601 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2603 status = be_mbox_notify_wait(adapter);
2605 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2606 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2607 CAPABILITY_BE3_NATIVE_ERX_API;
2608 if (!adapter->be3_native)
2609 dev_warn(&adapter->pdev->dev,
2610 "adapter not in advanced mode\n");
2613 mutex_unlock(&adapter->mbox_lock);
2617 /* Get privilege(s) for a function */
2618 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2621 struct be_mcc_wrb *wrb;
2622 struct be_cmd_req_get_fn_privileges *req;
2625 spin_lock_bh(&adapter->mcc_lock);
2627 wrb = wrb_from_mccq(adapter);
2633 req = embedded_payload(wrb);
2635 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2636 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2639 req->hdr.domain = domain;
2641 status = be_mcc_notify_wait(adapter);
2643 struct be_cmd_resp_get_fn_privileges *resp =
2644 embedded_payload(wrb);
2645 *privilege = le32_to_cpu(resp->privilege_mask);
2649 spin_unlock_bh(&adapter->mcc_lock);
2653 /* Set privilege(s) for a function */
2654 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2657 struct be_mcc_wrb *wrb;
2658 struct be_cmd_req_set_fn_privileges *req;
2661 spin_lock_bh(&adapter->mcc_lock);
2663 wrb = wrb_from_mccq(adapter);
2669 req = embedded_payload(wrb);
2670 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2671 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2673 req->hdr.domain = domain;
2674 if (lancer_chip(adapter))
2675 req->privileges_lancer = cpu_to_le32(privileges);
2677 req->privileges = cpu_to_le32(privileges);
2679 status = be_mcc_notify_wait(adapter);
2681 spin_unlock_bh(&adapter->mcc_lock);
2685 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2686 * pmac_id_valid: false => pmac_id or MAC address is requested.
2687 * If pmac_id is returned, pmac_id_valid is returned as true
2689 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2690 bool *pmac_id_valid, u32 *pmac_id, u8 domain)
2692 struct be_mcc_wrb *wrb;
2693 struct be_cmd_req_get_mac_list *req;
2696 struct be_dma_mem get_mac_list_cmd;
2699 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2700 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2701 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2702 get_mac_list_cmd.size,
2703 &get_mac_list_cmd.dma);
2705 if (!get_mac_list_cmd.va) {
2706 dev_err(&adapter->pdev->dev,
2707 "Memory allocation failure during GET_MAC_LIST\n");
2711 spin_lock_bh(&adapter->mcc_lock);
2713 wrb = wrb_from_mccq(adapter);
2719 req = get_mac_list_cmd.va;
2721 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2722 OPCODE_COMMON_GET_MAC_LIST,
2723 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2724 req->hdr.domain = domain;
2725 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2726 if (*pmac_id_valid) {
2727 req->mac_id = cpu_to_le32(*pmac_id);
2728 req->iface_id = cpu_to_le16(adapter->if_handle);
2729 req->perm_override = 0;
2731 req->perm_override = 1;
2734 status = be_mcc_notify_wait(adapter);
2736 struct be_cmd_resp_get_mac_list *resp =
2737 get_mac_list_cmd.va;
2739 if (*pmac_id_valid) {
2740 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2745 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2746 /* Mac list returned could contain one or more active mac_ids
2747 * or one or more true or pseudo permanant mac addresses.
2748 * If an active mac_id is present, return first active mac_id
2751 for (i = 0; i < mac_count; i++) {
2752 struct get_list_macaddr *mac_entry;
2756 mac_entry = &resp->macaddr_list[i];
2757 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2758 /* mac_id is a 32 bit value and mac_addr size
2761 if (mac_addr_size == sizeof(u32)) {
2762 *pmac_id_valid = true;
2763 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2764 *pmac_id = le32_to_cpu(mac_id);
2768 /* If no active mac_id found, return first mac addr */
2769 *pmac_id_valid = false;
2770 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2775 spin_unlock_bh(&adapter->mcc_lock);
2776 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2777 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2781 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
2785 if (BEx_chip(adapter))
2786 return be_cmd_mac_addr_query(adapter, mac, false,
2787 adapter->if_handle, curr_pmac_id);
2789 /* Fetch the MAC address using pmac_id */
2790 return be_cmd_get_mac_from_list(adapter, mac, &active,
2794 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2797 bool pmac_valid = false;
2799 memset(mac, 0, ETH_ALEN);
2801 if (BEx_chip(adapter)) {
2802 if (be_physfn(adapter))
2803 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2806 status = be_cmd_mac_addr_query(adapter, mac, false,
2807 adapter->if_handle, 0);
2809 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2816 /* Uses synchronous MCCQ */
2817 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2818 u8 mac_count, u32 domain)
2820 struct be_mcc_wrb *wrb;
2821 struct be_cmd_req_set_mac_list *req;
2823 struct be_dma_mem cmd;
2825 memset(&cmd, 0, sizeof(struct be_dma_mem));
2826 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2827 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2828 &cmd.dma, GFP_KERNEL);
2832 spin_lock_bh(&adapter->mcc_lock);
2834 wrb = wrb_from_mccq(adapter);
2841 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2842 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2845 req->hdr.domain = domain;
2846 req->mac_count = mac_count;
2848 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2850 status = be_mcc_notify_wait(adapter);
2853 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2855 spin_unlock_bh(&adapter->mcc_lock);
2859 /* Wrapper to delete any active MACs and provision the new mac.
2860 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2861 * current list are active.
2863 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2865 bool active_mac = false;
2866 u8 old_mac[ETH_ALEN];
2870 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
2872 if (!status && active_mac)
2873 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2875 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2878 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2879 u32 domain, u16 intf_id, u16 hsw_mode)
2881 struct be_mcc_wrb *wrb;
2882 struct be_cmd_req_set_hsw_config *req;
2886 spin_lock_bh(&adapter->mcc_lock);
2888 wrb = wrb_from_mccq(adapter);
2894 req = embedded_payload(wrb);
2895 ctxt = &req->context;
2897 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2898 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2900 req->hdr.domain = domain;
2901 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2903 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2904 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2906 if (!BEx_chip(adapter) && hsw_mode) {
2907 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
2908 ctxt, adapter->hba_port_num);
2909 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
2910 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
2914 be_dws_cpu_to_le(req->context, sizeof(req->context));
2915 status = be_mcc_notify_wait(adapter);
2918 spin_unlock_bh(&adapter->mcc_lock);
2922 /* Get Hyper switch config */
2923 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2924 u32 domain, u16 intf_id, u8 *mode)
2926 struct be_mcc_wrb *wrb;
2927 struct be_cmd_req_get_hsw_config *req;
2932 spin_lock_bh(&adapter->mcc_lock);
2934 wrb = wrb_from_mccq(adapter);
2940 req = embedded_payload(wrb);
2941 ctxt = &req->context;
2943 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2944 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2946 req->hdr.domain = domain;
2947 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2949 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2951 if (!BEx_chip(adapter)) {
2952 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2953 ctxt, adapter->hba_port_num);
2954 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
2956 be_dws_cpu_to_le(req->context, sizeof(req->context));
2958 status = be_mcc_notify_wait(adapter);
2960 struct be_cmd_resp_get_hsw_config *resp =
2961 embedded_payload(wrb);
2962 be_dws_le_to_cpu(&resp->context,
2963 sizeof(resp->context));
2964 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2965 pvid, &resp->context);
2967 *pvid = le16_to_cpu(vid);
2969 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2970 port_fwd_type, &resp->context);
2974 spin_unlock_bh(&adapter->mcc_lock);
2978 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2980 struct be_mcc_wrb *wrb;
2981 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2983 int payload_len = sizeof(*req);
2984 struct be_dma_mem cmd;
2986 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2990 if (mutex_lock_interruptible(&adapter->mbox_lock))
2993 memset(&cmd, 0, sizeof(struct be_dma_mem));
2994 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2995 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2998 dev_err(&adapter->pdev->dev,
2999 "Memory allocation failure\n");
3004 wrb = wrb_from_mbox(adapter);
3012 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3013 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3014 payload_len, wrb, &cmd);
3016 req->hdr.version = 1;
3017 req->query_options = BE_GET_WOL_CAP;
3019 status = be_mbox_notify_wait(adapter);
3021 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3022 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3024 /* the command could succeed misleadingly on old f/w
3025 * which is not aware of the V1 version. fake an error. */
3026 if (resp->hdr.response_length < payload_len) {
3030 adapter->wol_cap = resp->wol_settings;
3033 mutex_unlock(&adapter->mbox_lock);
3035 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3039 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3040 struct be_dma_mem *cmd)
3042 struct be_mcc_wrb *wrb;
3043 struct be_cmd_req_get_ext_fat_caps *req;
3046 if (mutex_lock_interruptible(&adapter->mbox_lock))
3049 wrb = wrb_from_mbox(adapter);
3056 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3057 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3058 cmd->size, wrb, cmd);
3059 req->parameter_type = cpu_to_le32(1);
3061 status = be_mbox_notify_wait(adapter);
3063 mutex_unlock(&adapter->mbox_lock);
3067 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3068 struct be_dma_mem *cmd,
3069 struct be_fat_conf_params *configs)
3071 struct be_mcc_wrb *wrb;
3072 struct be_cmd_req_set_ext_fat_caps *req;
3075 spin_lock_bh(&adapter->mcc_lock);
3077 wrb = wrb_from_mccq(adapter);
3084 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3085 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3086 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3087 cmd->size, wrb, cmd);
3089 status = be_mcc_notify_wait(adapter);
3091 spin_unlock_bh(&adapter->mcc_lock);
3095 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3097 struct be_mcc_wrb *wrb;
3098 struct be_cmd_req_get_port_name *req;
3101 if (!lancer_chip(adapter)) {
3102 *port_name = adapter->hba_port_num + '0';
3106 spin_lock_bh(&adapter->mcc_lock);
3108 wrb = wrb_from_mccq(adapter);
3114 req = embedded_payload(wrb);
3116 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3117 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3119 req->hdr.version = 1;
3121 status = be_mcc_notify_wait(adapter);
3123 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3124 *port_name = resp->port_name[adapter->hba_port_num];
3126 *port_name = adapter->hba_port_num + '0';
3129 spin_unlock_bh(&adapter->mcc_lock);
3133 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
3135 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3138 for (i = 0; i < desc_count; i++) {
3139 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3140 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
3141 return (struct be_nic_res_desc *)hdr;
3143 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3144 hdr = (void *)hdr + hdr->desc_len;
3149 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3152 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3153 struct be_pcie_res_desc *pcie;
3156 for (i = 0; i < desc_count; i++) {
3157 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3158 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3159 pcie = (struct be_pcie_res_desc *)hdr;
3160 if (pcie->pf_num == devfn)
3164 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3165 hdr = (void *)hdr + hdr->desc_len;
3170 static void be_copy_nic_desc(struct be_resources *res,
3171 struct be_nic_res_desc *desc)
3173 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3174 res->max_vlans = le16_to_cpu(desc->vlan_count);
3175 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3176 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3177 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3178 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3179 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3180 /* Clear flags that driver is not interested in */
3181 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3182 BE_IF_CAP_FLAGS_WANT;
3183 /* Need 1 RXQ as the default RXQ */
3184 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3185 res->max_rss_qs -= 1;
3189 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3191 struct be_mcc_wrb *wrb;
3192 struct be_cmd_req_get_func_config *req;
3194 struct be_dma_mem cmd;
3196 if (mutex_lock_interruptible(&adapter->mbox_lock))
3199 memset(&cmd, 0, sizeof(struct be_dma_mem));
3200 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3201 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3204 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3209 wrb = wrb_from_mbox(adapter);
3217 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3218 OPCODE_COMMON_GET_FUNC_CONFIG,
3219 cmd.size, wrb, &cmd);
3221 if (skyhawk_chip(adapter))
3222 req->hdr.version = 1;
3224 status = be_mbox_notify_wait(adapter);
3226 struct be_cmd_resp_get_func_config *resp = cmd.va;
3227 u32 desc_count = le32_to_cpu(resp->desc_count);
3228 struct be_nic_res_desc *desc;
3230 desc = be_get_nic_desc(resp->func_param, desc_count);
3236 adapter->pf_number = desc->pf_num;
3237 be_copy_nic_desc(res, desc);
3240 mutex_unlock(&adapter->mbox_lock);
3242 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3247 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3248 u8 domain, struct be_dma_mem *cmd)
3250 struct be_mcc_wrb *wrb;
3251 struct be_cmd_req_get_profile_config *req;
3254 if (mutex_lock_interruptible(&adapter->mbox_lock))
3256 wrb = wrb_from_mbox(adapter);
3259 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3260 OPCODE_COMMON_GET_PROFILE_CONFIG,
3261 cmd->size, wrb, cmd);
3263 req->type = ACTIVE_PROFILE_TYPE;
3264 req->hdr.domain = domain;
3265 if (!lancer_chip(adapter))
3266 req->hdr.version = 1;
3268 status = be_mbox_notify_wait(adapter);
3270 mutex_unlock(&adapter->mbox_lock);
3275 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3276 u8 domain, struct be_dma_mem *cmd)
3278 struct be_mcc_wrb *wrb;
3279 struct be_cmd_req_get_profile_config *req;
3282 spin_lock_bh(&adapter->mcc_lock);
3284 wrb = wrb_from_mccq(adapter);
3291 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3292 OPCODE_COMMON_GET_PROFILE_CONFIG,
3293 cmd->size, wrb, cmd);
3295 req->type = ACTIVE_PROFILE_TYPE;
3296 req->hdr.domain = domain;
3297 if (!lancer_chip(adapter))
3298 req->hdr.version = 1;
3300 status = be_mcc_notify_wait(adapter);
3303 spin_unlock_bh(&adapter->mcc_lock);
3307 /* Uses sync mcc, if MCCQ is already created otherwise mbox */
3308 int be_cmd_get_profile_config(struct be_adapter *adapter,
3309 struct be_resources *res, u8 domain)
3311 struct be_cmd_resp_get_profile_config *resp;
3312 struct be_pcie_res_desc *pcie;
3313 struct be_nic_res_desc *nic;
3314 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3315 struct be_dma_mem cmd;
3319 memset(&cmd, 0, sizeof(struct be_dma_mem));
3320 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3321 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3326 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3328 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3333 desc_count = le32_to_cpu(resp->desc_count);
3335 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3338 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3340 nic = be_get_nic_desc(resp->func_param, desc_count);
3342 be_copy_nic_desc(res, nic);
3346 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3350 /* Currently only Lancer uses this command and it supports version 0 only
3353 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3356 struct be_mcc_wrb *wrb;
3357 struct be_cmd_req_set_profile_config *req;
3360 spin_lock_bh(&adapter->mcc_lock);
3362 wrb = wrb_from_mccq(adapter);
3368 req = embedded_payload(wrb);
3370 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3371 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3373 req->hdr.domain = domain;
3374 req->desc_count = cpu_to_le32(1);
3375 req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3376 req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3377 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3378 req->nic_desc.pf_num = adapter->pf_number;
3379 req->nic_desc.vf_num = domain;
3381 /* Mark fields invalid */
3382 req->nic_desc.unicast_mac_count = 0xFFFF;
3383 req->nic_desc.mcc_count = 0xFFFF;
3384 req->nic_desc.vlan_count = 0xFFFF;
3385 req->nic_desc.mcast_mac_count = 0xFFFF;
3386 req->nic_desc.txq_count = 0xFFFF;
3387 req->nic_desc.rq_count = 0xFFFF;
3388 req->nic_desc.rssq_count = 0xFFFF;
3389 req->nic_desc.lro_count = 0xFFFF;
3390 req->nic_desc.cq_count = 0xFFFF;
3391 req->nic_desc.toe_conn_count = 0xFFFF;
3392 req->nic_desc.eq_count = 0xFFFF;
3393 req->nic_desc.link_param = 0xFF;
3394 req->nic_desc.bw_min = 0xFFFFFFFF;
3395 req->nic_desc.acpi_params = 0xFF;
3396 req->nic_desc.wol_param = 0x0F;
3399 req->nic_desc.bw_min = cpu_to_le32(bps);
3400 req->nic_desc.bw_max = cpu_to_le32(bps);
3401 status = be_mcc_notify_wait(adapter);
3403 spin_unlock_bh(&adapter->mcc_lock);
3407 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3410 struct be_mcc_wrb *wrb;
3411 struct be_cmd_req_get_iface_list *req;
3412 struct be_cmd_resp_get_iface_list *resp;
3415 spin_lock_bh(&adapter->mcc_lock);
3417 wrb = wrb_from_mccq(adapter);
3422 req = embedded_payload(wrb);
3424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3425 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3427 req->hdr.domain = vf_num + 1;
3429 status = be_mcc_notify_wait(adapter);
3431 resp = (struct be_cmd_resp_get_iface_list *)req;
3432 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3436 spin_unlock_bh(&adapter->mcc_lock);
3440 static int lancer_wait_idle(struct be_adapter *adapter)
3442 #define SLIPORT_IDLE_TIMEOUT 30
3446 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3447 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3448 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3454 if (i == SLIPORT_IDLE_TIMEOUT)
3460 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3464 status = lancer_wait_idle(adapter);
3468 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3473 /* Routine to check whether dump image is present or not */
3474 bool dump_present(struct be_adapter *adapter)
3476 u32 sliport_status = 0;
3478 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3479 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3482 int lancer_initiate_dump(struct be_adapter *adapter)
3486 /* give firmware reset and diagnostic dump */
3487 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3488 PHYSDEV_CONTROL_DD_MASK);
3490 dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3494 status = lancer_wait_idle(adapter);
3498 if (!dump_present(adapter)) {
3499 dev_err(&adapter->pdev->dev, "Dump image not present\n");
3507 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3509 struct be_mcc_wrb *wrb;
3510 struct be_cmd_enable_disable_vf *req;
3513 if (!lancer_chip(adapter))
3516 spin_lock_bh(&adapter->mcc_lock);
3518 wrb = wrb_from_mccq(adapter);
3524 req = embedded_payload(wrb);
3526 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3527 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3530 req->hdr.domain = domain;
3532 status = be_mcc_notify_wait(adapter);
3534 spin_unlock_bh(&adapter->mcc_lock);
3538 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3540 struct be_mcc_wrb *wrb;
3541 struct be_cmd_req_intr_set *req;
3544 if (mutex_lock_interruptible(&adapter->mbox_lock))
3547 wrb = wrb_from_mbox(adapter);
3549 req = embedded_payload(wrb);
3551 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3552 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3555 req->intr_enabled = intr_enable;
3557 status = be_mbox_notify_wait(adapter);
3559 mutex_unlock(&adapter->mbox_lock);
3563 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3564 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3566 struct be_adapter *adapter = netdev_priv(netdev_handle);
3567 struct be_mcc_wrb *wrb;
3568 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3569 struct be_cmd_req_hdr *req;
3570 struct be_cmd_resp_hdr *resp;
3573 spin_lock_bh(&adapter->mcc_lock);
3575 wrb = wrb_from_mccq(adapter);
3580 req = embedded_payload(wrb);
3581 resp = embedded_payload(wrb);
3583 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3584 hdr->opcode, wrb_payload_size, wrb, NULL);
3585 memcpy(req, wrb_payload, wrb_payload_size);
3586 be_dws_cpu_to_le(req, wrb_payload_size);
3588 status = be_mcc_notify_wait(adapter);
3590 *cmd_status = (status & 0xffff);
3593 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3594 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3596 spin_unlock_bh(&adapter->mcc_lock);
3599 EXPORT_SYMBOL(be_roce_mcc_cmd);