]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/be2iscsi/be_cmds.c
KVM: x86: Polish exception injection via KVM_SET_GUEST_DEBUG
[karo-tx-linux.git] / drivers / scsi / be2iscsi / be_cmds.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_mgmt.h"
20 #include "be_main.h"
21
22 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
23 {
24         if (compl->flags != 0) {
25                 compl->flags = le32_to_cpu(compl->flags);
26                 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
27                 return true;
28         } else
29                 return false;
30 }
31
32 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
33 {
34         compl->flags = 0;
35 }
36
37 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
38                                 struct be_mcc_compl *compl)
39 {
40         u16 compl_status, extd_status;
41
42         be_dws_le_to_cpu(compl, 4);
43
44         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
45                                         CQE_STATUS_COMPL_MASK;
46         if (compl_status != MCC_STATUS_SUCCESS) {
47                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
48                                                 CQE_STATUS_EXTD_MASK;
49                 dev_err(&ctrl->pdev->dev,
50                         "error in cmd completion: status(compl/extd)=%d/%d\n",
51                         compl_status, extd_status);
52                 return -1;
53         }
54         return 0;
55 }
56
57 static inline bool is_link_state_evt(u32 trailer)
58 {
59         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
60                 ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE);
61 }
62
63 void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
64                        u16 num_popped)
65 {
66         u32 val = 0;
67         val |= qid & DB_CQ_RING_ID_MASK;
68         if (arm)
69                 val |= 1 << DB_CQ_REARM_SHIFT;
70         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
71         iowrite32(val, ctrl->db + DB_CQ_OFFSET);
72 }
73
74 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
75 {
76 #define long_delay 2000
77         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
78         int cnt = 0, wait = 5;  /* in usecs */
79         u32 ready;
80
81         do {
82                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
83                 if (ready)
84                         break;
85
86                 if (cnt > 6000000) {
87                         dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
88                         return -1;
89                 }
90
91                 if (cnt > 50) {
92                         wait = long_delay;
93                         mdelay(long_delay / 1000);
94                 } else
95                         udelay(wait);
96                 cnt += wait;
97         } while (true);
98         return 0;
99 }
100
101 int be_mbox_notify(struct be_ctrl_info *ctrl)
102 {
103         int status;
104         u32 val = 0;
105         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
106         struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
107         struct be_mcc_mailbox *mbox = mbox_mem->va;
108         struct be_mcc_compl *compl = &mbox->compl;
109
110         val &= ~MPU_MAILBOX_DB_RDY_MASK;
111         val |= MPU_MAILBOX_DB_HI_MASK;
112         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
113         iowrite32(val, db);
114
115         status = be_mbox_db_ready_wait(ctrl);
116         if (status != 0) {
117                 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
118                 return status;
119         }
120         val = 0;
121         val &= ~MPU_MAILBOX_DB_RDY_MASK;
122         val &= ~MPU_MAILBOX_DB_HI_MASK;
123         val |= (u32) (mbox_mem->dma >> 4) << 2;
124         iowrite32(val, db);
125
126         status = be_mbox_db_ready_wait(ctrl);
127         if (status != 0) {
128                 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
129                 return status;
130         }
131         if (be_mcc_compl_is_new(compl)) {
132                 status = be_mcc_compl_process(ctrl, &mbox->compl);
133                 be_mcc_compl_use(compl);
134                 if (status) {
135                         SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
136                         return status;
137                 }
138         } else {
139                 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
140                 return -1;
141         }
142         return 0;
143 }
144
145 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
146                                 bool embedded, u8 sge_cnt)
147 {
148         if (embedded)
149                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
150         else
151                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
152                                                 MCC_WRB_SGE_CNT_SHIFT;
153         wrb->payload_length = payload_len;
154         be_dws_cpu_to_le(wrb, 8);
155 }
156
157 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
158                         u8 subsystem, u8 opcode, int cmd_len)
159 {
160         req_hdr->opcode = opcode;
161         req_hdr->subsystem = subsystem;
162         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
163 }
164
165 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
166                                                         struct be_dma_mem *mem)
167 {
168         int i, buf_pages;
169         u64 dma = (u64) mem->dma;
170
171         buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
172         for (i = 0; i < buf_pages; i++) {
173                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
174                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
175                 dma += PAGE_SIZE_4K;
176         }
177 }
178
179 static u32 eq_delay_to_mult(u32 usec_delay)
180 {
181 #define MAX_INTR_RATE 651042
182         const u32 round = 10;
183         u32 multiplier;
184
185         if (usec_delay == 0)
186                 multiplier = 0;
187         else {
188                 u32 interrupt_rate = 1000000 / usec_delay;
189                 if (interrupt_rate == 0)
190                         multiplier = 1023;
191                 else {
192                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
193                         multiplier /= interrupt_rate;
194                         multiplier = (multiplier + round / 2) / round;
195                         multiplier = min(multiplier, (u32) 1023);
196                 }
197         }
198         return multiplier;
199 }
200
201 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
202 {
203         return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
204 }
205
206 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
207                           struct be_queue_info *eq, int eq_delay)
208 {
209         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
210         struct be_cmd_req_eq_create *req = embedded_payload(wrb);
211         struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
212         struct be_dma_mem *q_mem = &eq->dma_mem;
213         int status;
214
215         spin_lock(&ctrl->mbox_lock);
216         memset(wrb, 0, sizeof(*wrb));
217
218         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
219
220         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
221                         OPCODE_COMMON_EQ_CREATE, sizeof(*req));
222
223         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
224
225         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
226                                                 PCI_FUNC(ctrl->pdev->devfn));
227         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
228         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
229         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
230                                         __ilog2_u32(eq->len / 256));
231         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
232                                         eq_delay_to_mult(eq_delay));
233         be_dws_cpu_to_le(req->context, sizeof(req->context));
234
235         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
236
237         status = be_mbox_notify(ctrl);
238         if (!status) {
239                 eq->id = le16_to_cpu(resp->eq_id);
240                 eq->created = true;
241         }
242         spin_unlock(&ctrl->mbox_lock);
243         return status;
244 }
245
246 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
247 {
248         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
249         int status;
250         u8 *endian_check;
251
252         spin_lock(&ctrl->mbox_lock);
253         memset(wrb, 0, sizeof(*wrb));
254
255         endian_check = (u8 *) wrb;
256         *endian_check++ = 0xFF;
257         *endian_check++ = 0x12;
258         *endian_check++ = 0x34;
259         *endian_check++ = 0xFF;
260         *endian_check++ = 0xFF;
261         *endian_check++ = 0x56;
262         *endian_check++ = 0x78;
263         *endian_check++ = 0xFF;
264         be_dws_cpu_to_le(wrb, sizeof(*wrb));
265
266         status = be_mbox_notify(ctrl);
267         if (status)
268                 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
269
270         spin_unlock(&ctrl->mbox_lock);
271         return status;
272 }
273
274 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
275                           struct be_queue_info *cq, struct be_queue_info *eq,
276                           bool sol_evts, bool no_delay, int coalesce_wm)
277 {
278         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
279         struct be_cmd_req_cq_create *req = embedded_payload(wrb);
280         struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
281         struct be_dma_mem *q_mem = &cq->dma_mem;
282         void *ctxt = &req->context;
283         int status;
284
285         spin_lock(&ctrl->mbox_lock);
286         memset(wrb, 0, sizeof(*wrb));
287
288         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
289
290         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
291                         OPCODE_COMMON_CQ_CREATE, sizeof(*req));
292
293         if (!q_mem->va)
294                 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
295
296         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
297
298         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
299         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
300         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
301                       __ilog2_u32(cq->len / 256));
302         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
303         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
304         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
305         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
306         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
307         AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
308                       PCI_FUNC(ctrl->pdev->devfn));
309         be_dws_cpu_to_le(ctxt, sizeof(req->context));
310
311         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
312
313         status = be_mbox_notify(ctrl);
314         if (!status) {
315                 cq->id = le16_to_cpu(resp->cq_id);
316                 cq->created = true;
317         } else
318                 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
319                         status);
320         spin_unlock(&ctrl->mbox_lock);
321
322         return status;
323 }
324
325 static u32 be_encoded_q_len(int q_len)
326 {
327         u32 len_encoded = fls(q_len);   /* log2(len) + 1 */
328         if (len_encoded == 16)
329                 len_encoded = 0;
330         return len_encoded;
331 }
332 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
333                           int queue_type)
334 {
335         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
336         struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
337         u8 subsys = 0, opcode = 0;
338         int status;
339
340         spin_lock(&ctrl->mbox_lock);
341         memset(wrb, 0, sizeof(*wrb));
342         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
343
344         switch (queue_type) {
345         case QTYPE_EQ:
346                 subsys = CMD_SUBSYSTEM_COMMON;
347                 opcode = OPCODE_COMMON_EQ_DESTROY;
348                 break;
349         case QTYPE_CQ:
350                 subsys = CMD_SUBSYSTEM_COMMON;
351                 opcode = OPCODE_COMMON_CQ_DESTROY;
352                 break;
353         case QTYPE_WRBQ:
354                 subsys = CMD_SUBSYSTEM_ISCSI;
355                 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
356                 break;
357         case QTYPE_DPDUQ:
358                 subsys = CMD_SUBSYSTEM_ISCSI;
359                 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
360                 break;
361         case QTYPE_SGL:
362                 subsys = CMD_SUBSYSTEM_ISCSI;
363                 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
364                 break;
365         default:
366                 spin_unlock(&ctrl->mbox_lock);
367                 BUG();
368                 return -1;
369         }
370         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
371         if (queue_type != QTYPE_SGL)
372                 req->id = cpu_to_le16(q->id);
373
374         status = be_mbox_notify(ctrl);
375
376         spin_unlock(&ctrl->mbox_lock);
377         return status;
378 }
379
380 int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr)
381 {
382         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
383         struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
384         int status;
385
386         spin_lock(&ctrl->mbox_lock);
387         memset(wrb, 0, sizeof(*wrb));
388         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
389         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
390                            OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
391                            sizeof(*req));
392
393         status = be_mbox_notify(ctrl);
394         if (!status) {
395                 struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
396
397                 memcpy(mac_addr, resp->mac_address, ETH_ALEN);
398         }
399
400         spin_unlock(&ctrl->mbox_lock);
401         return status;
402 }
403
404 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
405                                     struct be_queue_info *cq,
406                                     struct be_queue_info *dq, int length,
407                                     int entry_size)
408 {
409         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
410         struct be_defq_create_req *req = embedded_payload(wrb);
411         struct be_dma_mem *q_mem = &dq->dma_mem;
412         void *ctxt = &req->context;
413         int status;
414
415         spin_lock(&ctrl->mbox_lock);
416         memset(wrb, 0, sizeof(*wrb));
417
418         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
419
420         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
421                            OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
422
423         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
424         AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
425         AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
426                       1);
427         AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
428                       PCI_FUNC(ctrl->pdev->devfn));
429         AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
430                       be_encoded_q_len(length / sizeof(struct phys_addr)));
431         AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
432                       ctxt, entry_size);
433         AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
434                       cq->id);
435
436         be_dws_cpu_to_le(ctxt, sizeof(req->context));
437
438         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
439
440         status = be_mbox_notify(ctrl);
441         if (!status) {
442                 struct be_defq_create_resp *resp = embedded_payload(wrb);
443
444                 dq->id = le16_to_cpu(resp->id);
445                 dq->created = true;
446         }
447         spin_unlock(&ctrl->mbox_lock);
448
449         return status;
450 }
451
452 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
453                        struct be_queue_info *wrbq)
454 {
455         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
456         struct be_wrbq_create_req *req = embedded_payload(wrb);
457         struct be_wrbq_create_resp *resp = embedded_payload(wrb);
458         int status;
459
460         spin_lock(&ctrl->mbox_lock);
461         memset(wrb, 0, sizeof(*wrb));
462
463         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
464
465         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
466                 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
467         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
468         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
469
470         status = be_mbox_notify(ctrl);
471         if (!status)
472                 wrbq->id = le16_to_cpu(resp->cid);
473         spin_unlock(&ctrl->mbox_lock);
474         return status;
475 }
476
477 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
478                                 struct be_dma_mem *q_mem,
479                                 u32 page_offset, u32 num_pages)
480 {
481         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
482         struct be_post_sgl_pages_req *req = embedded_payload(wrb);
483         int status;
484         unsigned int curr_pages;
485         u32 internal_page_offset = 0;
486         u32 temp_num_pages = num_pages;
487
488         if (num_pages == 0xff)
489                 num_pages = 1;
490
491         spin_lock(&ctrl->mbox_lock);
492         do {
493                 memset(wrb, 0, sizeof(*wrb));
494                 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
495                 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
496                                    OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
497                                    sizeof(*req));
498                 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
499                                                 pages);
500                 req->num_pages = min(num_pages, curr_pages);
501                 req->page_offset = page_offset;
502                 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
503                 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
504                 internal_page_offset += req->num_pages;
505                 page_offset += req->num_pages;
506                 num_pages -= req->num_pages;
507
508                 if (temp_num_pages == 0xff)
509                         req->num_pages = temp_num_pages;
510
511                 status = be_mbox_notify(ctrl);
512                 if (status) {
513                         SE_DEBUG(DBG_LVL_1,
514                                  "FW CMD to map iscsi frags failed.\n");
515                         goto error;
516                 }
517         } while (num_pages > 0);
518 error:
519         spin_unlock(&ctrl->mbox_lock);
520         if (status != 0)
521                 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
522         return status;
523 }