2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
41 struct c4iw_wr_wait wr_wait;
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (u64)&wr_wait;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
67 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
69 printk(KERN_ERR MOD "Device %s not responding!\n",
70 pci_name(rdev->lldi.pdev));
71 rdev->flags = T4_FATAL_ERROR;
78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx);
85 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
86 struct c4iw_dev_ucontext *uctx)
88 struct fw_ri_res_wr *res_wr;
89 struct fw_ri_res *res;
91 int user = (uctx != &rdev->uctx);
92 struct c4iw_wr_wait wr_wait;
96 cq->cqid = c4iw_get_cqid(rdev, uctx);
103 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
109 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
110 &cq->dma_addr, GFP_KERNEL);
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize);
118 /* build fw_ri_res_wr */
119 wr_len = sizeof *res_wr + sizeof *res;
121 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
126 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
128 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
129 memset(res_wr, 0, wr_len);
130 res_wr->op_nres = cpu_to_be32(
131 FW_WR_OP(FW_RI_RES_WR) |
132 V_FW_RI_RES_WR_NRES(1) |
134 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
135 res_wr->cookie = (u64)&wr_wait;
137 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
138 res->u.cq.op = FW_RI_RES_OP_WRITE;
139 res->u.cq.iqid = cpu_to_be32(cq->cqid);
140 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
141 V_FW_RI_RES_WR_IQANUS(0) |
142 V_FW_RI_RES_WR_IQANUD(1) |
143 F_FW_RI_RES_WR_IQANDST |
144 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
145 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
146 F_FW_RI_RES_WR_IQDROPRSS |
147 V_FW_RI_RES_WR_IQPCIECH(2) |
148 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
150 V_FW_RI_RES_WR_IQESIZE(1));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
154 c4iw_init_wr_wait(&wr_wait);
156 ret = c4iw_ofld_send(rdev, skb);
159 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
160 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
162 printk(KERN_ERR MOD "Device %s not responding!\n",
163 pci_name(rdev->lldi.pdev));
164 rdev->flags = T4_FATAL_ERROR;
172 cq->gts = rdev->lldi.gts_reg;
175 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
176 (cq->cqid << rdev->cqshift);
177 cq->ugts &= PAGE_MASK;
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping));
186 c4iw_put_cqid(rdev, cq->cqid, uctx);
191 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
195 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
196 wq, cq, cq->sw_cidx, cq->sw_pidx);
197 memset(&cqe, 0, sizeof(cqe));
198 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
199 V_CQE_OPCODE(FW_RI_SEND) |
202 V_CQE_QPID(wq->rq.qid));
203 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
204 cq->sw_queue[cq->sw_pidx] = cqe;
208 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
211 int in_use = wq->rq.in_use - count;
214 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
215 wq, cq, wq->rq.in_use, count);
217 insert_recv_cqe(wq, cq);
223 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
224 struct t4_swsqe *swcqe)
228 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
229 wq, cq, cq->sw_cidx, cq->sw_pidx);
230 memset(&cqe, 0, sizeof(cqe));
231 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
232 V_CQE_OPCODE(swcqe->opcode) |
235 V_CQE_QPID(wq->sq.qid));
236 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
237 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
238 cq->sw_queue[cq->sw_pidx] = cqe;
242 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
245 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
246 int in_use = wq->sq.in_use - count;
251 insert_sq_cqe(wq, cq, swsqe);
253 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
254 swsqe = wq->sq.sw_sq;
261 * Move all CQEs from the HWCQ into the SWCQ.
263 void c4iw_flush_hw_cq(struct t4_cq *cq)
265 struct t4_cqe *cqe = NULL, *swcqe;
268 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
269 ret = t4_next_hw_cqe(cq, &cqe);
271 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
272 __func__, cq->cidx, cq->sw_pidx);
273 swcqe = &cq->sw_queue[cq->sw_pidx];
275 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
278 ret = t4_next_hw_cqe(cq, &cqe);
282 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
284 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
287 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
290 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
293 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
298 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
305 while (ptr != cq->sw_pidx) {
306 cqe = &cq->sw_queue[ptr];
307 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
308 wq->sq.oldest_read)) &&
309 (CQE_QPID(cqe) == wq->sq.qid))
311 if (++ptr == cq->size)
314 PDBG("%s cq %p count %d\n", __func__, cq, *count);
317 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
323 PDBG("%s count zero %d\n", __func__, *count);
325 while (ptr != cq->sw_pidx) {
326 cqe = &cq->sw_queue[ptr];
327 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
328 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
330 if (++ptr == cq->size)
333 PDBG("%s cq %p count %d\n", __func__, cq, *count);
336 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
338 struct t4_swsqe *swsqe;
339 u16 ptr = wq->sq.cidx;
340 int count = wq->sq.in_use;
343 swsqe = &wq->sq.sw_sq[ptr];
345 if (!swsqe->signaled) {
346 if (++ptr == wq->sq.size)
348 swsqe = &wq->sq.sw_sq[ptr];
350 } else if (swsqe->complete) {
353 * Insert this completed cqe into the swcq.
355 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
356 __func__, ptr, cq->sw_pidx);
357 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
358 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
361 wq->sq.in_use -= unsignaled;
367 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
368 struct t4_cqe *read_cqe)
370 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
371 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
372 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) |
379 * Return a ptr to the next read wr in the SWSQ or NULL.
381 static void advance_oldest_read(struct t4_wq *wq)
384 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
386 if (rptr == wq->sq.size)
388 while (rptr != wq->sq.pidx) {
389 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
391 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
393 if (++rptr == wq->sq.size)
396 wq->sq.oldest_read = NULL;
403 * check the validity of the first CQE,
404 * supply the wq assicated with the qpid.
406 * credit: cq credit to return to sge.
407 * cqe_flushed: 1 iff the CQE is flushed.
408 * cqe: copy of the polled CQE.
412 * -EAGAIN CQE skipped, try again.
413 * -EOVERFLOW CQ overflow detected.
415 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
416 u8 *cqe_flushed, u64 *cookie, u32 *credit)
419 struct t4_cqe *hw_cqe, read_cqe;
423 ret = t4_next_cqe(cq, &hw_cqe);
427 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
428 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
429 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
430 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
431 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
432 CQE_WRID_LOW(hw_cqe));
435 * skip cqe's not affiliated with a QP.
443 * Gotta tweak READ completions:
444 * 1) the cqe doesn't contain the sq_wptr from the wr.
445 * 2) opcode not reflected from the wr.
446 * 3) read_len not reflected from the wr.
447 * 4) cq_type is RQ_TYPE not SQ_TYPE.
449 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
452 * If this is an unsolicited read response, then the read
453 * was generated by the kernel driver as part of peer-2-peer
454 * connection setup. So ignore the completion.
456 if (!wq->sq.oldest_read) {
457 if (CQE_STATUS(hw_cqe))
458 t4_set_wq_in_error(wq);
464 * Don't write to the HWCQ, so create a new read req CQE
467 create_read_req_cqe(wq, hw_cqe, &read_cqe);
469 advance_oldest_read(wq);
472 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
473 *cqe_flushed = t4_wq_in_error(wq);
474 t4_set_wq_in_error(wq);
481 if (RQ_TYPE(hw_cqe)) {
484 * HW only validates 4 bits of MSN. So we must validate that
485 * the MSN in the SEND is the next expected MSN. If its not,
486 * then we complete this with T4_ERR_MSN and mark the wq in
490 if (t4_rq_empty(wq)) {
491 t4_set_wq_in_error(wq);
495 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
496 t4_set_wq_in_error(wq);
497 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
504 * If we get here its a send completion.
506 * Handle out of order completion. These get stuffed
507 * in the SW SQ. Then the SW SQ is walked to move any
508 * now in-order completions into the SW CQ. This handles
510 * 1) reaping unsignaled WRs when the first subsequent
511 * signaled WR is completed.
512 * 2) out of order read completions.
514 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
515 struct t4_swsqe *swsqe;
517 PDBG("%s out of order completion going in sw_sq at idx %u\n",
518 __func__, CQE_WRID_SQ_IDX(hw_cqe));
519 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
520 swsqe->cqe = *hw_cqe;
530 * Reap the associated WR(s) that are freed up with this
533 if (SQ_TYPE(hw_cqe)) {
534 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
535 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
536 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
539 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
540 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
541 BUG_ON(t4_rq_empty(wq));
547 * Flush any completed cqes that are now in-order.
549 flush_completed_wrs(wq, cq);
552 if (SW_CQE(hw_cqe)) {
553 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
554 __func__, cq, cq->cqid, cq->sw_cidx);
557 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
558 __func__, cq, cq->cqid, cq->cidx);
565 * Get one cq entry from c4iw and map it to openib.
570 * -EAGAIN caller must try again
571 * any other -errno fatal error
573 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
575 struct c4iw_qp *qhp = NULL;
576 struct t4_cqe cqe = {0, 0}, *rd_cqe;
583 ret = t4_next_cqe(&chp->cq, &rd_cqe);
588 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
592 spin_lock(&qhp->lock);
595 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
601 wc->vendor_err = CQE_STATUS(&cqe);
604 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
605 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
606 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
607 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
609 if (CQE_TYPE(&cqe) == 0) {
610 if (!CQE_STATUS(&cqe))
611 wc->byte_len = CQE_LEN(&cqe);
614 wc->opcode = IB_WC_RECV;
615 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
616 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
617 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
618 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
621 switch (CQE_OPCODE(&cqe)) {
622 case FW_RI_RDMA_WRITE:
623 wc->opcode = IB_WC_RDMA_WRITE;
626 wc->opcode = IB_WC_RDMA_READ;
627 wc->byte_len = CQE_LEN(&cqe);
629 case FW_RI_SEND_WITH_INV:
630 case FW_RI_SEND_WITH_SE_INV:
631 wc->opcode = IB_WC_SEND;
632 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
635 case FW_RI_SEND_WITH_SE:
636 wc->opcode = IB_WC_SEND;
639 wc->opcode = IB_WC_BIND_MW;
642 case FW_RI_LOCAL_INV:
643 wc->opcode = IB_WC_LOCAL_INV;
645 case FW_RI_FAST_REGISTER:
646 wc->opcode = IB_WC_FAST_REG_MR;
649 printk(KERN_ERR MOD "Unexpected opcode %d "
650 "in the CQE received for QPID=0x%0x\n",
651 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
658 wc->status = IB_WC_WR_FLUSH_ERR;
661 switch (CQE_STATUS(&cqe)) {
663 wc->status = IB_WC_SUCCESS;
666 wc->status = IB_WC_LOC_ACCESS_ERR;
669 wc->status = IB_WC_LOC_PROT_ERR;
673 wc->status = IB_WC_LOC_ACCESS_ERR;
676 wc->status = IB_WC_GENERAL_ERR;
679 wc->status = IB_WC_LOC_LEN_ERR;
681 case T4_ERR_INVALIDATE_SHARED_MR:
682 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
683 wc->status = IB_WC_MW_BIND_ERR;
687 case T4_ERR_PDU_LEN_ERR:
688 case T4_ERR_OUT_OF_RQE:
689 case T4_ERR_DDP_VERSION:
690 case T4_ERR_RDMA_VERSION:
691 case T4_ERR_DDP_QUEUE_NUM:
695 case T4_ERR_MSN_RANGE:
696 case T4_ERR_IRD_OVERFLOW:
698 wc->status = IB_WC_FATAL_ERR;
701 wc->status = IB_WC_WR_FLUSH_ERR;
705 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
706 CQE_STATUS(&cqe), CQE_QPID(&cqe));
712 spin_unlock(&qhp->lock);
716 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
723 chp = to_c4iw_cq(ibcq);
725 spin_lock_irqsave(&chp->lock, flags);
726 for (npolled = 0; npolled < num_entries; ++npolled) {
728 err = c4iw_poll_cq_one(chp, wc + npolled);
729 } while (err == -EAGAIN);
733 spin_unlock_irqrestore(&chp->lock, flags);
734 return !err || err == -ENODATA ? npolled : err;
737 int c4iw_destroy_cq(struct ib_cq *ib_cq)
740 struct c4iw_ucontext *ucontext;
742 PDBG("%s ib_cq %p\n", __func__, ib_cq);
743 chp = to_c4iw_cq(ib_cq);
745 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
746 atomic_dec(&chp->refcnt);
747 wait_event(chp->wait, !atomic_read(&chp->refcnt));
749 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
751 destroy_cq(&chp->rhp->rdev, &chp->cq,
752 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
757 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
758 int vector, struct ib_ucontext *ib_context,
759 struct ib_udata *udata)
761 struct c4iw_dev *rhp;
763 struct c4iw_create_cq_resp uresp;
764 struct c4iw_ucontext *ucontext = NULL;
767 struct c4iw_mm_entry *mm, *mm2;
769 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
771 rhp = to_c4iw_dev(ibdev);
773 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
775 return ERR_PTR(-ENOMEM);
778 ucontext = to_c4iw_ucontext(ib_context);
780 /* account for the status page. */
784 * entries must be multiple of 16 for HW.
786 entries = roundup(entries, 16);
787 memsize = entries * sizeof *chp->cq.queue;
790 * memsize must be a multiple of the page size if its a user cq.
793 memsize = roundup(memsize, PAGE_SIZE);
794 chp->cq.size = entries;
795 chp->cq.memsize = memsize;
797 ret = create_cq(&rhp->rdev, &chp->cq,
798 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
803 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size;
805 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait);
808 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
813 mm = kmalloc(sizeof *mm, GFP_KERNEL);
816 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
820 uresp.qid_mask = rhp->rdev.cqmask;
821 uresp.cqid = chp->cq.cqid;
822 uresp.size = chp->cq.size;
823 uresp.memsize = chp->cq.memsize;
824 spin_lock(&ucontext->mmap_lock);
825 uresp.key = ucontext->key;
826 ucontext->key += PAGE_SIZE;
827 uresp.gts_key = ucontext->key;
828 ucontext->key += PAGE_SIZE;
829 spin_unlock(&ucontext->mmap_lock);
830 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
835 mm->addr = virt_to_phys(chp->cq.queue);
836 mm->len = chp->cq.memsize;
837 insert_mmap(ucontext, mm);
839 mm2->key = uresp.gts_key;
840 mm2->addr = chp->cq.ugts;
841 mm2->len = PAGE_SIZE;
842 insert_mmap(ucontext, mm2);
844 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
845 __func__, chp->cq.cqid, chp, chp->cq.size,
847 (unsigned long long) chp->cq.dma_addr);
854 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
863 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
868 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
874 chp = to_c4iw_cq(ibcq);
875 spin_lock_irqsave(&chp->lock, flag);
876 ret = t4_arm_cq(&chp->cq,
877 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
878 spin_unlock_irqrestore(&chp->lock, flag);
879 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))