]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_cmds.c
be2net: memory barrier fixes on IBM p7 platform
[karo-tx-linux.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
29         wmb();
30         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
31 }
32
33 /* To check if valid bit is set, check the entire word as we don't know
34  * the endianness of the data (old entry is host endian while a new entry is
35  * little endian) */
36 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
37 {
38         if (compl->flags != 0) {
39                 compl->flags = le32_to_cpu(compl->flags);
40                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
41                 return true;
42         } else {
43                 return false;
44         }
45 }
46
47 /* Need to reset the entire word that houses the valid bit */
48 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
49 {
50         compl->flags = 0;
51 }
52
53 static int be_mcc_compl_process(struct be_adapter *adapter,
54         struct be_mcc_compl *compl)
55 {
56         u16 compl_status, extd_status;
57
58         /* Just swap the status to host endian; mcc tag is opaquely copied
59          * from mcc_wrb */
60         be_dws_le_to_cpu(compl, 4);
61
62         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
63                                 CQE_STATUS_COMPL_MASK;
64
65         if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
66                 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
67                 adapter->flash_status = compl_status;
68                 complete(&adapter->flash_compl);
69         }
70
71         if (compl_status == MCC_STATUS_SUCCESS) {
72                 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73                         struct be_cmd_resp_get_stats *resp =
74                                                 adapter->stats.cmd.va;
75                         be_dws_le_to_cpu(&resp->hw_stats,
76                                                 sizeof(resp->hw_stats));
77                         netdev_stats_update(adapter);
78                 }
79         } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
80                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
81                                 CQE_STATUS_EXTD_MASK;
82                 dev_warn(&adapter->pdev->dev,
83                 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
84                         compl->tag0, compl_status, extd_status);
85         }
86         return compl_status;
87 }
88
89 /* Link state evt is a string of bytes; no need for endian swapping */
90 static void be_async_link_state_process(struct be_adapter *adapter,
91                 struct be_async_event_link_state *evt)
92 {
93         be_link_status_update(adapter,
94                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
95 }
96
97 static inline bool is_link_state_evt(u32 trailer)
98 {
99         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
100                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
101                                 ASYNC_EVENT_CODE_LINK_STATE);
102 }
103
104 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
105 {
106         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
107         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
108
109         if (be_mcc_compl_is_new(compl)) {
110                 queue_tail_inc(mcc_cq);
111                 return compl;
112         }
113         return NULL;
114 }
115
116 void be_async_mcc_enable(struct be_adapter *adapter)
117 {
118         spin_lock_bh(&adapter->mcc_cq_lock);
119
120         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
121         adapter->mcc_obj.rearm_cq = true;
122
123         spin_unlock_bh(&adapter->mcc_cq_lock);
124 }
125
126 void be_async_mcc_disable(struct be_adapter *adapter)
127 {
128         adapter->mcc_obj.rearm_cq = false;
129 }
130
131 int be_process_mcc(struct be_adapter *adapter, int *status)
132 {
133         struct be_mcc_compl *compl;
134         int num = 0;
135         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
136
137         spin_lock_bh(&adapter->mcc_cq_lock);
138         while ((compl = be_mcc_compl_get(adapter))) {
139                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
140                         /* Interpret flags as an async trailer */
141                         BUG_ON(!is_link_state_evt(compl->flags));
142
143                         /* Interpret compl as a async link evt */
144                         be_async_link_state_process(adapter,
145                                 (struct be_async_event_link_state *) compl);
146                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
147                                 *status = be_mcc_compl_process(adapter, compl);
148                                 atomic_dec(&mcc_obj->q.used);
149                 }
150                 be_mcc_compl_use(compl);
151                 num++;
152         }
153
154         spin_unlock_bh(&adapter->mcc_cq_lock);
155         return num;
156 }
157
158 /* Wait till no more pending mcc requests are present */
159 static int be_mcc_wait_compl(struct be_adapter *adapter)
160 {
161 #define mcc_timeout             120000 /* 12s timeout */
162         int i, num, status = 0;
163         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
164
165         for (i = 0; i < mcc_timeout; i++) {
166                 num = be_process_mcc(adapter, &status);
167                 if (num)
168                         be_cq_notify(adapter, mcc_obj->cq.id,
169                                 mcc_obj->rearm_cq, num);
170
171                 if (atomic_read(&mcc_obj->q.used) == 0)
172                         break;
173                 udelay(100);
174         }
175         if (i == mcc_timeout) {
176                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
177                 return -1;
178         }
179         return status;
180 }
181
182 /* Notify MCC requests and wait for completion */
183 static int be_mcc_notify_wait(struct be_adapter *adapter)
184 {
185         be_mcc_notify(adapter);
186         return be_mcc_wait_compl(adapter);
187 }
188
189 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
190 {
191         int msecs = 0;
192         u32 ready;
193
194         do {
195                 ready = ioread32(db);
196                 if (ready == 0xffffffff) {
197                         dev_err(&adapter->pdev->dev,
198                                 "pci slot disconnected\n");
199                         return -1;
200                 }
201
202                 ready &= MPU_MAILBOX_DB_RDY_MASK;
203                 if (ready)
204                         break;
205
206                 if (msecs > 4000) {
207                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
208                         return -1;
209                 }
210
211                 set_current_state(TASK_INTERRUPTIBLE);
212                 schedule_timeout(msecs_to_jiffies(1));
213                 msecs++;
214         } while (true);
215
216         return 0;
217 }
218
219 /*
220  * Insert the mailbox address into the doorbell in two steps
221  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
222  */
223 static int be_mbox_notify_wait(struct be_adapter *adapter)
224 {
225         int status;
226         u32 val = 0;
227         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
228         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
229         struct be_mcc_mailbox *mbox = mbox_mem->va;
230         struct be_mcc_compl *compl = &mbox->compl;
231
232         /* wait for ready to be set */
233         status = be_mbox_db_ready_wait(adapter, db);
234         if (status != 0)
235                 return status;
236
237         val |= MPU_MAILBOX_DB_HI_MASK;
238         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
239         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
240         iowrite32(val, db);
241
242         /* wait for ready to be set */
243         status = be_mbox_db_ready_wait(adapter, db);
244         if (status != 0)
245                 return status;
246
247         val = 0;
248         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
249         val |= (u32)(mbox_mem->dma >> 4) << 2;
250         iowrite32(val, db);
251
252         status = be_mbox_db_ready_wait(adapter, db);
253         if (status != 0)
254                 return status;
255
256         /* A cq entry has been made now */
257         if (be_mcc_compl_is_new(compl)) {
258                 status = be_mcc_compl_process(adapter, &mbox->compl);
259                 be_mcc_compl_use(compl);
260                 if (status)
261                         return status;
262         } else {
263                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
264                 return -1;
265         }
266         return 0;
267 }
268
269 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
270 {
271         u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
272
273         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
274         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
275                 return -1;
276         else
277                 return 0;
278 }
279
280 int be_cmd_POST(struct be_adapter *adapter)
281 {
282         u16 stage;
283         int status, timeout = 0;
284
285         do {
286                 status = be_POST_stage_get(adapter, &stage);
287                 if (status) {
288                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
289                                 stage);
290                         return -1;
291                 } else if (stage != POST_STAGE_ARMFW_RDY) {
292                         set_current_state(TASK_INTERRUPTIBLE);
293                         schedule_timeout(2 * HZ);
294                         timeout += 2;
295                 } else {
296                         return 0;
297                 }
298         } while (timeout < 40);
299
300         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
301         return -1;
302 }
303
304 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
305 {
306         return wrb->payload.embedded_payload;
307 }
308
309 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
310 {
311         return &wrb->payload.sgl[0];
312 }
313
314 /* Don't touch the hdr after it's prepared */
315 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
316                                 bool embedded, u8 sge_cnt, u32 opcode)
317 {
318         if (embedded)
319                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
320         else
321                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
322                                 MCC_WRB_SGE_CNT_SHIFT;
323         wrb->payload_length = payload_len;
324         wrb->tag0 = opcode;
325         be_dws_cpu_to_le(wrb, 8);
326 }
327
328 /* Don't touch the hdr after it's prepared */
329 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
330                                 u8 subsystem, u8 opcode, int cmd_len)
331 {
332         req_hdr->opcode = opcode;
333         req_hdr->subsystem = subsystem;
334         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
335         req_hdr->version = 0;
336 }
337
338 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
339                         struct be_dma_mem *mem)
340 {
341         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
342         u64 dma = (u64)mem->dma;
343
344         for (i = 0; i < buf_pages; i++) {
345                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
346                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
347                 dma += PAGE_SIZE_4K;
348         }
349 }
350
351 /* Converts interrupt delay in microseconds to multiplier value */
352 static u32 eq_delay_to_mult(u32 usec_delay)
353 {
354 #define MAX_INTR_RATE                   651042
355         const u32 round = 10;
356         u32 multiplier;
357
358         if (usec_delay == 0)
359                 multiplier = 0;
360         else {
361                 u32 interrupt_rate = 1000000 / usec_delay;
362                 /* Max delay, corresponding to the lowest interrupt rate */
363                 if (interrupt_rate == 0)
364                         multiplier = 1023;
365                 else {
366                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
367                         multiplier /= interrupt_rate;
368                         /* Round the multiplier to the closest value.*/
369                         multiplier = (multiplier + round/2) / round;
370                         multiplier = min(multiplier, (u32)1023);
371                 }
372         }
373         return multiplier;
374 }
375
376 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
377 {
378         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
379         struct be_mcc_wrb *wrb
380                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
381         memset(wrb, 0, sizeof(*wrb));
382         return wrb;
383 }
384
385 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
386 {
387         struct be_queue_info *mccq = &adapter->mcc_obj.q;
388         struct be_mcc_wrb *wrb;
389
390         if (atomic_read(&mccq->used) >= mccq->len) {
391                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
392                 return NULL;
393         }
394
395         wrb = queue_head_node(mccq);
396         queue_head_inc(mccq);
397         atomic_inc(&mccq->used);
398         memset(wrb, 0, sizeof(*wrb));
399         return wrb;
400 }
401
402 /* Tell fw we're about to start firing cmds by writing a
403  * special pattern across the wrb hdr; uses mbox
404  */
405 int be_cmd_fw_init(struct be_adapter *adapter)
406 {
407         u8 *wrb;
408         int status;
409
410         spin_lock(&adapter->mbox_lock);
411
412         wrb = (u8 *)wrb_from_mbox(adapter);
413         *wrb++ = 0xFF;
414         *wrb++ = 0x12;
415         *wrb++ = 0x34;
416         *wrb++ = 0xFF;
417         *wrb++ = 0xFF;
418         *wrb++ = 0x56;
419         *wrb++ = 0x78;
420         *wrb = 0xFF;
421
422         status = be_mbox_notify_wait(adapter);
423
424         spin_unlock(&adapter->mbox_lock);
425         return status;
426 }
427
428 /* Tell fw we're done with firing cmds by writing a
429  * special pattern across the wrb hdr; uses mbox
430  */
431 int be_cmd_fw_clean(struct be_adapter *adapter)
432 {
433         u8 *wrb;
434         int status;
435
436         if (adapter->eeh_err)
437                 return -EIO;
438
439         spin_lock(&adapter->mbox_lock);
440
441         wrb = (u8 *)wrb_from_mbox(adapter);
442         *wrb++ = 0xFF;
443         *wrb++ = 0xAA;
444         *wrb++ = 0xBB;
445         *wrb++ = 0xFF;
446         *wrb++ = 0xFF;
447         *wrb++ = 0xCC;
448         *wrb++ = 0xDD;
449         *wrb = 0xFF;
450
451         status = be_mbox_notify_wait(adapter);
452
453         spin_unlock(&adapter->mbox_lock);
454         return status;
455 }
456 int be_cmd_eq_create(struct be_adapter *adapter,
457                 struct be_queue_info *eq, int eq_delay)
458 {
459         struct be_mcc_wrb *wrb;
460         struct be_cmd_req_eq_create *req;
461         struct be_dma_mem *q_mem = &eq->dma_mem;
462         int status;
463
464         spin_lock(&adapter->mbox_lock);
465
466         wrb = wrb_from_mbox(adapter);
467         req = embedded_payload(wrb);
468
469         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
470
471         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
472                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
473
474         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
475
476         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
477         /* 4byte eqe*/
478         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
479         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
480                         __ilog2_u32(eq->len/256));
481         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
482                         eq_delay_to_mult(eq_delay));
483         be_dws_cpu_to_le(req->context, sizeof(req->context));
484
485         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
486
487         status = be_mbox_notify_wait(adapter);
488         if (!status) {
489                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
490                 eq->id = le16_to_cpu(resp->eq_id);
491                 eq->created = true;
492         }
493
494         spin_unlock(&adapter->mbox_lock);
495         return status;
496 }
497
498 /* Uses mbox */
499 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
500                         u8 type, bool permanent, u32 if_handle)
501 {
502         struct be_mcc_wrb *wrb;
503         struct be_cmd_req_mac_query *req;
504         int status;
505
506         spin_lock(&adapter->mbox_lock);
507
508         wrb = wrb_from_mbox(adapter);
509         req = embedded_payload(wrb);
510
511         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
512                         OPCODE_COMMON_NTWK_MAC_QUERY);
513
514         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
515                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
516
517         req->type = type;
518         if (permanent) {
519                 req->permanent = 1;
520         } else {
521                 req->if_id = cpu_to_le16((u16) if_handle);
522                 req->permanent = 0;
523         }
524
525         status = be_mbox_notify_wait(adapter);
526         if (!status) {
527                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
528                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
529         }
530
531         spin_unlock(&adapter->mbox_lock);
532         return status;
533 }
534
535 /* Uses synchronous MCCQ */
536 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
537                 u32 if_id, u32 *pmac_id)
538 {
539         struct be_mcc_wrb *wrb;
540         struct be_cmd_req_pmac_add *req;
541         int status;
542
543         spin_lock_bh(&adapter->mcc_lock);
544
545         wrb = wrb_from_mccq(adapter);
546         if (!wrb) {
547                 status = -EBUSY;
548                 goto err;
549         }
550         req = embedded_payload(wrb);
551
552         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
553                         OPCODE_COMMON_NTWK_PMAC_ADD);
554
555         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
556                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
557
558         req->if_id = cpu_to_le32(if_id);
559         memcpy(req->mac_address, mac_addr, ETH_ALEN);
560
561         status = be_mcc_notify_wait(adapter);
562         if (!status) {
563                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
564                 *pmac_id = le32_to_cpu(resp->pmac_id);
565         }
566
567 err:
568         spin_unlock_bh(&adapter->mcc_lock);
569         return status;
570 }
571
572 /* Uses synchronous MCCQ */
573 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
574 {
575         struct be_mcc_wrb *wrb;
576         struct be_cmd_req_pmac_del *req;
577         int status;
578
579         spin_lock_bh(&adapter->mcc_lock);
580
581         wrb = wrb_from_mccq(adapter);
582         if (!wrb) {
583                 status = -EBUSY;
584                 goto err;
585         }
586         req = embedded_payload(wrb);
587
588         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
589                         OPCODE_COMMON_NTWK_PMAC_DEL);
590
591         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
592                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
593
594         req->if_id = cpu_to_le32(if_id);
595         req->pmac_id = cpu_to_le32(pmac_id);
596
597         status = be_mcc_notify_wait(adapter);
598
599 err:
600         spin_unlock_bh(&adapter->mcc_lock);
601         return status;
602 }
603
604 /* Uses Mbox */
605 int be_cmd_cq_create(struct be_adapter *adapter,
606                 struct be_queue_info *cq, struct be_queue_info *eq,
607                 bool sol_evts, bool no_delay, int coalesce_wm)
608 {
609         struct be_mcc_wrb *wrb;
610         struct be_cmd_req_cq_create *req;
611         struct be_dma_mem *q_mem = &cq->dma_mem;
612         void *ctxt;
613         int status;
614
615         spin_lock(&adapter->mbox_lock);
616
617         wrb = wrb_from_mbox(adapter);
618         req = embedded_payload(wrb);
619         ctxt = &req->context;
620
621         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
622                         OPCODE_COMMON_CQ_CREATE);
623
624         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
625                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
626
627         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
628
629         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
630         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
631         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
632                         __ilog2_u32(cq->len/256));
633         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
634         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
635         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
636         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
637         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
638         be_dws_cpu_to_le(ctxt, sizeof(req->context));
639
640         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
641
642         status = be_mbox_notify_wait(adapter);
643         if (!status) {
644                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
645                 cq->id = le16_to_cpu(resp->cq_id);
646                 cq->created = true;
647         }
648
649         spin_unlock(&adapter->mbox_lock);
650
651         return status;
652 }
653
654 static u32 be_encoded_q_len(int q_len)
655 {
656         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
657         if (len_encoded == 16)
658                 len_encoded = 0;
659         return len_encoded;
660 }
661
662 int be_cmd_mccq_create(struct be_adapter *adapter,
663                         struct be_queue_info *mccq,
664                         struct be_queue_info *cq)
665 {
666         struct be_mcc_wrb *wrb;
667         struct be_cmd_req_mcc_create *req;
668         struct be_dma_mem *q_mem = &mccq->dma_mem;
669         void *ctxt;
670         int status;
671
672         spin_lock(&adapter->mbox_lock);
673
674         wrb = wrb_from_mbox(adapter);
675         req = embedded_payload(wrb);
676         ctxt = &req->context;
677
678         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
679                         OPCODE_COMMON_MCC_CREATE);
680
681         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
682                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
683
684         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
685
686         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
687         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
688                 be_encoded_q_len(mccq->len));
689         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
690
691         be_dws_cpu_to_le(ctxt, sizeof(req->context));
692
693         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
694
695         status = be_mbox_notify_wait(adapter);
696         if (!status) {
697                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
698                 mccq->id = le16_to_cpu(resp->id);
699                 mccq->created = true;
700         }
701         spin_unlock(&adapter->mbox_lock);
702
703         return status;
704 }
705
706 int be_cmd_txq_create(struct be_adapter *adapter,
707                         struct be_queue_info *txq,
708                         struct be_queue_info *cq)
709 {
710         struct be_mcc_wrb *wrb;
711         struct be_cmd_req_eth_tx_create *req;
712         struct be_dma_mem *q_mem = &txq->dma_mem;
713         void *ctxt;
714         int status;
715
716         spin_lock(&adapter->mbox_lock);
717
718         wrb = wrb_from_mbox(adapter);
719         req = embedded_payload(wrb);
720         ctxt = &req->context;
721
722         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
723                         OPCODE_ETH_TX_CREATE);
724
725         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
726                 sizeof(*req));
727
728         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
729         req->ulp_num = BE_ULP1_NUM;
730         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
731
732         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
733                 be_encoded_q_len(txq->len));
734         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
735         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
736
737         be_dws_cpu_to_le(ctxt, sizeof(req->context));
738
739         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
740
741         status = be_mbox_notify_wait(adapter);
742         if (!status) {
743                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
744                 txq->id = le16_to_cpu(resp->cid);
745                 txq->created = true;
746         }
747
748         spin_unlock(&adapter->mbox_lock);
749
750         return status;
751 }
752
753 /* Uses mbox */
754 int be_cmd_rxq_create(struct be_adapter *adapter,
755                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
756                 u16 max_frame_size, u32 if_id, u32 rss)
757 {
758         struct be_mcc_wrb *wrb;
759         struct be_cmd_req_eth_rx_create *req;
760         struct be_dma_mem *q_mem = &rxq->dma_mem;
761         int status;
762
763         spin_lock(&adapter->mbox_lock);
764
765         wrb = wrb_from_mbox(adapter);
766         req = embedded_payload(wrb);
767
768         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
769                         OPCODE_ETH_RX_CREATE);
770
771         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
772                 sizeof(*req));
773
774         req->cq_id = cpu_to_le16(cq_id);
775         req->frag_size = fls(frag_size) - 1;
776         req->num_pages = 2;
777         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
778         req->interface_id = cpu_to_le32(if_id);
779         req->max_frame_size = cpu_to_le16(max_frame_size);
780         req->rss_queue = cpu_to_le32(rss);
781
782         status = be_mbox_notify_wait(adapter);
783         if (!status) {
784                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
785                 rxq->id = le16_to_cpu(resp->id);
786                 rxq->created = true;
787         }
788
789         spin_unlock(&adapter->mbox_lock);
790
791         return status;
792 }
793
794 /* Generic destroyer function for all types of queues
795  * Uses Mbox
796  */
797 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
798                 int queue_type)
799 {
800         struct be_mcc_wrb *wrb;
801         struct be_cmd_req_q_destroy *req;
802         u8 subsys = 0, opcode = 0;
803         int status;
804
805         if (adapter->eeh_err)
806                 return -EIO;
807
808         spin_lock(&adapter->mbox_lock);
809
810         wrb = wrb_from_mbox(adapter);
811         req = embedded_payload(wrb);
812
813         switch (queue_type) {
814         case QTYPE_EQ:
815                 subsys = CMD_SUBSYSTEM_COMMON;
816                 opcode = OPCODE_COMMON_EQ_DESTROY;
817                 break;
818         case QTYPE_CQ:
819                 subsys = CMD_SUBSYSTEM_COMMON;
820                 opcode = OPCODE_COMMON_CQ_DESTROY;
821                 break;
822         case QTYPE_TXQ:
823                 subsys = CMD_SUBSYSTEM_ETH;
824                 opcode = OPCODE_ETH_TX_DESTROY;
825                 break;
826         case QTYPE_RXQ:
827                 subsys = CMD_SUBSYSTEM_ETH;
828                 opcode = OPCODE_ETH_RX_DESTROY;
829                 break;
830         case QTYPE_MCCQ:
831                 subsys = CMD_SUBSYSTEM_COMMON;
832                 opcode = OPCODE_COMMON_MCC_DESTROY;
833                 break;
834         default:
835                 BUG();
836         }
837
838         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
839
840         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
841         req->id = cpu_to_le16(q->id);
842
843         status = be_mbox_notify_wait(adapter);
844
845         spin_unlock(&adapter->mbox_lock);
846
847         return status;
848 }
849
850 /* Create an rx filtering policy configuration on an i/f
851  * Uses mbox
852  */
853 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
854                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
855                 u32 domain)
856 {
857         struct be_mcc_wrb *wrb;
858         struct be_cmd_req_if_create *req;
859         int status;
860
861         spin_lock(&adapter->mbox_lock);
862
863         wrb = wrb_from_mbox(adapter);
864         req = embedded_payload(wrb);
865
866         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
867                         OPCODE_COMMON_NTWK_INTERFACE_CREATE);
868
869         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
870                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
871
872         req->hdr.domain = domain;
873         req->capability_flags = cpu_to_le32(cap_flags);
874         req->enable_flags = cpu_to_le32(en_flags);
875         req->pmac_invalid = pmac_invalid;
876         if (!pmac_invalid)
877                 memcpy(req->mac_addr, mac, ETH_ALEN);
878
879         status = be_mbox_notify_wait(adapter);
880         if (!status) {
881                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
882                 *if_handle = le32_to_cpu(resp->interface_id);
883                 if (!pmac_invalid)
884                         *pmac_id = le32_to_cpu(resp->pmac_id);
885         }
886
887         spin_unlock(&adapter->mbox_lock);
888         return status;
889 }
890
891 /* Uses mbox */
892 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
893 {
894         struct be_mcc_wrb *wrb;
895         struct be_cmd_req_if_destroy *req;
896         int status;
897
898         if (adapter->eeh_err)
899                 return -EIO;
900
901         spin_lock(&adapter->mbox_lock);
902
903         wrb = wrb_from_mbox(adapter);
904         req = embedded_payload(wrb);
905
906         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
907                         OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
908
909         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
910                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
911
912         req->interface_id = cpu_to_le32(interface_id);
913
914         status = be_mbox_notify_wait(adapter);
915
916         spin_unlock(&adapter->mbox_lock);
917
918         return status;
919 }
920
921 /* Get stats is a non embedded command: the request is not embedded inside
922  * WRB but is a separate dma memory block
923  * Uses asynchronous MCC
924  */
925 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
926 {
927         struct be_mcc_wrb *wrb;
928         struct be_cmd_req_get_stats *req;
929         struct be_sge *sge;
930         int status = 0;
931
932         spin_lock_bh(&adapter->mcc_lock);
933
934         wrb = wrb_from_mccq(adapter);
935         if (!wrb) {
936                 status = -EBUSY;
937                 goto err;
938         }
939         req = nonemb_cmd->va;
940         sge = nonembedded_sgl(wrb);
941
942         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
943                         OPCODE_ETH_GET_STATISTICS);
944
945         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
946                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
947         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
948         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
949         sge->len = cpu_to_le32(nonemb_cmd->size);
950
951         be_mcc_notify(adapter);
952
953 err:
954         spin_unlock_bh(&adapter->mcc_lock);
955         return status;
956 }
957
958 /* Uses synchronous mcc */
959 int be_cmd_link_status_query(struct be_adapter *adapter,
960                         bool *link_up, u8 *mac_speed, u16 *link_speed)
961 {
962         struct be_mcc_wrb *wrb;
963         struct be_cmd_req_link_status *req;
964         int status;
965
966         spin_lock_bh(&adapter->mcc_lock);
967
968         wrb = wrb_from_mccq(adapter);
969         if (!wrb) {
970                 status = -EBUSY;
971                 goto err;
972         }
973         req = embedded_payload(wrb);
974
975         *link_up = false;
976
977         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
978                         OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
979
980         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
981                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
982
983         status = be_mcc_notify_wait(adapter);
984         if (!status) {
985                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
986                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
987                         *link_up = true;
988                         *link_speed = le16_to_cpu(resp->link_speed);
989                         *mac_speed = resp->mac_speed;
990                 }
991         }
992
993 err:
994         spin_unlock_bh(&adapter->mcc_lock);
995         return status;
996 }
997
998 /* Uses Mbox */
999 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1000 {
1001         struct be_mcc_wrb *wrb;
1002         struct be_cmd_req_get_fw_version *req;
1003         int status;
1004
1005         spin_lock(&adapter->mbox_lock);
1006
1007         wrb = wrb_from_mbox(adapter);
1008         req = embedded_payload(wrb);
1009
1010         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1011                         OPCODE_COMMON_GET_FW_VERSION);
1012
1013         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1014                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1015
1016         status = be_mbox_notify_wait(adapter);
1017         if (!status) {
1018                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1019                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1020         }
1021
1022         spin_unlock(&adapter->mbox_lock);
1023         return status;
1024 }
1025
1026 /* set the EQ delay interval of an EQ to specified value
1027  * Uses async mcc
1028  */
1029 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1030 {
1031         struct be_mcc_wrb *wrb;
1032         struct be_cmd_req_modify_eq_delay *req;
1033         int status = 0;
1034
1035         spin_lock_bh(&adapter->mcc_lock);
1036
1037         wrb = wrb_from_mccq(adapter);
1038         if (!wrb) {
1039                 status = -EBUSY;
1040                 goto err;
1041         }
1042         req = embedded_payload(wrb);
1043
1044         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1045                         OPCODE_COMMON_MODIFY_EQ_DELAY);
1046
1047         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1048                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1049
1050         req->num_eq = cpu_to_le32(1);
1051         req->delay[0].eq_id = cpu_to_le32(eq_id);
1052         req->delay[0].phase = 0;
1053         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1054
1055         be_mcc_notify(adapter);
1056
1057 err:
1058         spin_unlock_bh(&adapter->mcc_lock);
1059         return status;
1060 }
1061
1062 /* Uses sycnhronous mcc */
1063 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1064                         u32 num, bool untagged, bool promiscuous)
1065 {
1066         struct be_mcc_wrb *wrb;
1067         struct be_cmd_req_vlan_config *req;
1068         int status;
1069
1070         spin_lock_bh(&adapter->mcc_lock);
1071
1072         wrb = wrb_from_mccq(adapter);
1073         if (!wrb) {
1074                 status = -EBUSY;
1075                 goto err;
1076         }
1077         req = embedded_payload(wrb);
1078
1079         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1080                         OPCODE_COMMON_NTWK_VLAN_CONFIG);
1081
1082         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1083                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1084
1085         req->interface_id = if_id;
1086         req->promiscuous = promiscuous;
1087         req->untagged = untagged;
1088         req->num_vlan = num;
1089         if (!promiscuous) {
1090                 memcpy(req->normal_vlan, vtag_array,
1091                         req->num_vlan * sizeof(vtag_array[0]));
1092         }
1093
1094         status = be_mcc_notify_wait(adapter);
1095
1096 err:
1097         spin_unlock_bh(&adapter->mcc_lock);
1098         return status;
1099 }
1100
1101 /* Uses MCC for this command as it may be called in BH context
1102  * Uses synchronous mcc
1103  */
1104 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1105 {
1106         struct be_mcc_wrb *wrb;
1107         struct be_cmd_req_promiscuous_config *req;
1108         int status;
1109
1110         spin_lock_bh(&adapter->mcc_lock);
1111
1112         wrb = wrb_from_mccq(adapter);
1113         if (!wrb) {
1114                 status = -EBUSY;
1115                 goto err;
1116         }
1117         req = embedded_payload(wrb);
1118
1119         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
1120
1121         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1122                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1123
1124         /* In FW versions X.102.149/X.101.487 and later,
1125          * the port setting associated only with the
1126          * issuing pci function will take effect
1127          */
1128         if (port_num)
1129                 req->port1_promiscuous = en;
1130         else
1131                 req->port0_promiscuous = en;
1132
1133         status = be_mcc_notify_wait(adapter);
1134
1135 err:
1136         spin_unlock_bh(&adapter->mcc_lock);
1137         return status;
1138 }
1139
1140 /*
1141  * Uses MCC for this command as it may be called in BH context
1142  * (mc == NULL) => multicast promiscous
1143  */
1144 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1145                 struct net_device *netdev, struct be_dma_mem *mem)
1146 {
1147         struct be_mcc_wrb *wrb;
1148         struct be_cmd_req_mcast_mac_config *req = mem->va;
1149         struct be_sge *sge;
1150         int status;
1151
1152         spin_lock_bh(&adapter->mcc_lock);
1153
1154         wrb = wrb_from_mccq(adapter);
1155         if (!wrb) {
1156                 status = -EBUSY;
1157                 goto err;
1158         }
1159         sge = nonembedded_sgl(wrb);
1160         memset(req, 0, sizeof(*req));
1161
1162         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1163                         OPCODE_COMMON_NTWK_MULTICAST_SET);
1164         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1165         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1166         sge->len = cpu_to_le32(mem->size);
1167
1168         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1169                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1170
1171         req->interface_id = if_id;
1172         if (netdev) {
1173                 int i;
1174                 struct netdev_hw_addr *ha;
1175
1176                 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1177
1178                 i = 0;
1179                 netdev_for_each_mc_addr(ha, netdev)
1180                         memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
1181         } else {
1182                 req->promiscuous = 1;
1183         }
1184
1185         status = be_mcc_notify_wait(adapter);
1186
1187 err:
1188         spin_unlock_bh(&adapter->mcc_lock);
1189         return status;
1190 }
1191
1192 /* Uses synchrounous mcc */
1193 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1194 {
1195         struct be_mcc_wrb *wrb;
1196         struct be_cmd_req_set_flow_control *req;
1197         int status;
1198
1199         spin_lock_bh(&adapter->mcc_lock);
1200
1201         wrb = wrb_from_mccq(adapter);
1202         if (!wrb) {
1203                 status = -EBUSY;
1204                 goto err;
1205         }
1206         req = embedded_payload(wrb);
1207
1208         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1209                         OPCODE_COMMON_SET_FLOW_CONTROL);
1210
1211         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1212                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1213
1214         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1215         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1216
1217         status = be_mcc_notify_wait(adapter);
1218
1219 err:
1220         spin_unlock_bh(&adapter->mcc_lock);
1221         return status;
1222 }
1223
1224 /* Uses sycn mcc */
1225 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1226 {
1227         struct be_mcc_wrb *wrb;
1228         struct be_cmd_req_get_flow_control *req;
1229         int status;
1230
1231         spin_lock_bh(&adapter->mcc_lock);
1232
1233         wrb = wrb_from_mccq(adapter);
1234         if (!wrb) {
1235                 status = -EBUSY;
1236                 goto err;
1237         }
1238         req = embedded_payload(wrb);
1239
1240         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1241                         OPCODE_COMMON_GET_FLOW_CONTROL);
1242
1243         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1244                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1245
1246         status = be_mcc_notify_wait(adapter);
1247         if (!status) {
1248                 struct be_cmd_resp_get_flow_control *resp =
1249                                                 embedded_payload(wrb);
1250                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1251                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1252         }
1253
1254 err:
1255         spin_unlock_bh(&adapter->mcc_lock);
1256         return status;
1257 }
1258
1259 /* Uses mbox */
1260 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1261 {
1262         struct be_mcc_wrb *wrb;
1263         struct be_cmd_req_query_fw_cfg *req;
1264         int status;
1265
1266         spin_lock(&adapter->mbox_lock);
1267
1268         wrb = wrb_from_mbox(adapter);
1269         req = embedded_payload(wrb);
1270
1271         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1272                         OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1273
1274         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1275                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1276
1277         status = be_mbox_notify_wait(adapter);
1278         if (!status) {
1279                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1280                 *port_num = le32_to_cpu(resp->phys_port);
1281                 *cap = le32_to_cpu(resp->function_cap);
1282         }
1283
1284         spin_unlock(&adapter->mbox_lock);
1285         return status;
1286 }
1287
1288 /* Uses mbox */
1289 int be_cmd_reset_function(struct be_adapter *adapter)
1290 {
1291         struct be_mcc_wrb *wrb;
1292         struct be_cmd_req_hdr *req;
1293         int status;
1294
1295         spin_lock(&adapter->mbox_lock);
1296
1297         wrb = wrb_from_mbox(adapter);
1298         req = embedded_payload(wrb);
1299
1300         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1301                         OPCODE_COMMON_FUNCTION_RESET);
1302
1303         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1304                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1305
1306         status = be_mbox_notify_wait(adapter);
1307
1308         spin_unlock(&adapter->mbox_lock);
1309         return status;
1310 }
1311
1312 /* Uses sync mcc */
1313 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1314                         u8 bcn, u8 sts, u8 state)
1315 {
1316         struct be_mcc_wrb *wrb;
1317         struct be_cmd_req_enable_disable_beacon *req;
1318         int status;
1319
1320         spin_lock_bh(&adapter->mcc_lock);
1321
1322         wrb = wrb_from_mccq(adapter);
1323         if (!wrb) {
1324                 status = -EBUSY;
1325                 goto err;
1326         }
1327         req = embedded_payload(wrb);
1328
1329         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1330                         OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1331
1332         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1333                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1334
1335         req->port_num = port_num;
1336         req->beacon_state = state;
1337         req->beacon_duration = bcn;
1338         req->status_duration = sts;
1339
1340         status = be_mcc_notify_wait(adapter);
1341
1342 err:
1343         spin_unlock_bh(&adapter->mcc_lock);
1344         return status;
1345 }
1346
1347 /* Uses sync mcc */
1348 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1349 {
1350         struct be_mcc_wrb *wrb;
1351         struct be_cmd_req_get_beacon_state *req;
1352         int status;
1353
1354         spin_lock_bh(&adapter->mcc_lock);
1355
1356         wrb = wrb_from_mccq(adapter);
1357         if (!wrb) {
1358                 status = -EBUSY;
1359                 goto err;
1360         }
1361         req = embedded_payload(wrb);
1362
1363         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1364                         OPCODE_COMMON_GET_BEACON_STATE);
1365
1366         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1367                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1368
1369         req->port_num = port_num;
1370
1371         status = be_mcc_notify_wait(adapter);
1372         if (!status) {
1373                 struct be_cmd_resp_get_beacon_state *resp =
1374                                                 embedded_payload(wrb);
1375                 *state = resp->beacon_state;
1376         }
1377
1378 err:
1379         spin_unlock_bh(&adapter->mcc_lock);
1380         return status;
1381 }
1382
1383 /* Uses sync mcc */
1384 int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1385                                 u8 *connector)
1386 {
1387         struct be_mcc_wrb *wrb;
1388         struct be_cmd_req_port_type *req;
1389         int status;
1390
1391         spin_lock_bh(&adapter->mcc_lock);
1392
1393         wrb = wrb_from_mccq(adapter);
1394         if (!wrb) {
1395                 status = -EBUSY;
1396                 goto err;
1397         }
1398         req = embedded_payload(wrb);
1399
1400         be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
1401                         OPCODE_COMMON_READ_TRANSRECV_DATA);
1402
1403         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1404                 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1405
1406         req->port = cpu_to_le32(port);
1407         req->page_num = cpu_to_le32(TR_PAGE_A0);
1408         status = be_mcc_notify_wait(adapter);
1409         if (!status) {
1410                 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1411                         *connector = resp->data.connector;
1412         }
1413
1414 err:
1415         spin_unlock_bh(&adapter->mcc_lock);
1416         return status;
1417 }
1418
1419 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1420                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1421 {
1422         struct be_mcc_wrb *wrb;
1423         struct be_cmd_write_flashrom *req;
1424         struct be_sge *sge;
1425         int status;
1426
1427         spin_lock_bh(&adapter->mcc_lock);
1428         adapter->flash_status = 0;
1429
1430         wrb = wrb_from_mccq(adapter);
1431         if (!wrb) {
1432                 status = -EBUSY;
1433                 goto err_unlock;
1434         }
1435         req = cmd->va;
1436         sge = nonembedded_sgl(wrb);
1437
1438         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1439                         OPCODE_COMMON_WRITE_FLASHROM);
1440         wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1441
1442         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1443                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1444         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1445         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1446         sge->len = cpu_to_le32(cmd->size);
1447
1448         req->params.op_type = cpu_to_le32(flash_type);
1449         req->params.op_code = cpu_to_le32(flash_opcode);
1450         req->params.data_buf_size = cpu_to_le32(buf_size);
1451
1452         be_mcc_notify(adapter);
1453         spin_unlock_bh(&adapter->mcc_lock);
1454
1455         if (!wait_for_completion_timeout(&adapter->flash_compl,
1456                         msecs_to_jiffies(12000)))
1457                 status = -1;
1458         else
1459                 status = adapter->flash_status;
1460
1461         return status;
1462
1463 err_unlock:
1464         spin_unlock_bh(&adapter->mcc_lock);
1465         return status;
1466 }
1467
1468 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1469                          int offset)
1470 {
1471         struct be_mcc_wrb *wrb;
1472         struct be_cmd_write_flashrom *req;
1473         int status;
1474
1475         spin_lock_bh(&adapter->mcc_lock);
1476
1477         wrb = wrb_from_mccq(adapter);
1478         if (!wrb) {
1479                 status = -EBUSY;
1480                 goto err;
1481         }
1482         req = embedded_payload(wrb);
1483
1484         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1485                         OPCODE_COMMON_READ_FLASHROM);
1486
1487         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1488                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1489
1490         req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1491         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1492         req->params.offset = cpu_to_le32(offset);
1493         req->params.data_buf_size = cpu_to_le32(0x4);
1494
1495         status = be_mcc_notify_wait(adapter);
1496         if (!status)
1497                 memcpy(flashed_crc, req->params.data_buf, 4);
1498
1499 err:
1500         spin_unlock_bh(&adapter->mcc_lock);
1501         return status;
1502 }
1503
1504 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1505                                 struct be_dma_mem *nonemb_cmd)
1506 {
1507         struct be_mcc_wrb *wrb;
1508         struct be_cmd_req_acpi_wol_magic_config *req;
1509         struct be_sge *sge;
1510         int status;
1511
1512         spin_lock_bh(&adapter->mcc_lock);
1513
1514         wrb = wrb_from_mccq(adapter);
1515         if (!wrb) {
1516                 status = -EBUSY;
1517                 goto err;
1518         }
1519         req = nonemb_cmd->va;
1520         sge = nonembedded_sgl(wrb);
1521
1522         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1523                         OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1524
1525         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1526                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1527         memcpy(req->magic_mac, mac, ETH_ALEN);
1528
1529         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1530         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1531         sge->len = cpu_to_le32(nonemb_cmd->size);
1532
1533         status = be_mcc_notify_wait(adapter);
1534
1535 err:
1536         spin_unlock_bh(&adapter->mcc_lock);
1537         return status;
1538 }
1539
1540 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1541                         u8 loopback_type, u8 enable)
1542 {
1543         struct be_mcc_wrb *wrb;
1544         struct be_cmd_req_set_lmode *req;
1545         int status;
1546
1547         spin_lock_bh(&adapter->mcc_lock);
1548
1549         wrb = wrb_from_mccq(adapter);
1550         if (!wrb) {
1551                 status = -EBUSY;
1552                 goto err;
1553         }
1554
1555         req = embedded_payload(wrb);
1556
1557         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1558                                 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1559
1560         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1561                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1562                         sizeof(*req));
1563
1564         req->src_port = port_num;
1565         req->dest_port = port_num;
1566         req->loopback_type = loopback_type;
1567         req->loopback_state = enable;
1568
1569         status = be_mcc_notify_wait(adapter);
1570 err:
1571         spin_unlock_bh(&adapter->mcc_lock);
1572         return status;
1573 }
1574
1575 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1576                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1577 {
1578         struct be_mcc_wrb *wrb;
1579         struct be_cmd_req_loopback_test *req;
1580         int status;
1581
1582         spin_lock_bh(&adapter->mcc_lock);
1583
1584         wrb = wrb_from_mccq(adapter);
1585         if (!wrb) {
1586                 status = -EBUSY;
1587                 goto err;
1588         }
1589
1590         req = embedded_payload(wrb);
1591
1592         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1593                                 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1594
1595         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1596                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1597         req->hdr.timeout = cpu_to_le32(4);
1598
1599         req->pattern = cpu_to_le64(pattern);
1600         req->src_port = cpu_to_le32(port_num);
1601         req->dest_port = cpu_to_le32(port_num);
1602         req->pkt_size = cpu_to_le32(pkt_size);
1603         req->num_pkts = cpu_to_le32(num_pkts);
1604         req->loopback_type = cpu_to_le32(loopback_type);
1605
1606         status = be_mcc_notify_wait(adapter);
1607         if (!status) {
1608                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1609                 status = le32_to_cpu(resp->status);
1610         }
1611
1612 err:
1613         spin_unlock_bh(&adapter->mcc_lock);
1614         return status;
1615 }
1616
1617 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1618                                 u32 byte_cnt, struct be_dma_mem *cmd)
1619 {
1620         struct be_mcc_wrb *wrb;
1621         struct be_cmd_req_ddrdma_test *req;
1622         struct be_sge *sge;
1623         int status;
1624         int i, j = 0;
1625
1626         spin_lock_bh(&adapter->mcc_lock);
1627
1628         wrb = wrb_from_mccq(adapter);
1629         if (!wrb) {
1630                 status = -EBUSY;
1631                 goto err;
1632         }
1633         req = cmd->va;
1634         sge = nonembedded_sgl(wrb);
1635         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1636                                 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1637         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1638                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1639
1640         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1641         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1642         sge->len = cpu_to_le32(cmd->size);
1643
1644         req->pattern = cpu_to_le64(pattern);
1645         req->byte_count = cpu_to_le32(byte_cnt);
1646         for (i = 0; i < byte_cnt; i++) {
1647                 req->snd_buff[i] = (u8)(pattern >> (j*8));
1648                 j++;
1649                 if (j > 7)
1650                         j = 0;
1651         }
1652
1653         status = be_mcc_notify_wait(adapter);
1654
1655         if (!status) {
1656                 struct be_cmd_resp_ddrdma_test *resp;
1657                 resp = cmd->va;
1658                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1659                                 resp->snd_err) {
1660                         status = -1;
1661                 }
1662         }
1663
1664 err:
1665         spin_unlock_bh(&adapter->mcc_lock);
1666         return status;
1667 }
1668
1669 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1670                                 struct be_dma_mem *nonemb_cmd)
1671 {
1672         struct be_mcc_wrb *wrb;
1673         struct be_cmd_req_seeprom_read *req;
1674         struct be_sge *sge;
1675         int status;
1676
1677         spin_lock_bh(&adapter->mcc_lock);
1678
1679         wrb = wrb_from_mccq(adapter);
1680         req = nonemb_cmd->va;
1681         sge = nonembedded_sgl(wrb);
1682
1683         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1684                         OPCODE_COMMON_SEEPROM_READ);
1685
1686         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1687                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1688
1689         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1690         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1691         sge->len = cpu_to_le32(nonemb_cmd->size);
1692
1693         status = be_mcc_notify_wait(adapter);
1694
1695         spin_unlock_bh(&adapter->mcc_lock);
1696         return status;
1697 }