1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
40 * Costa Mesa, CA 92626
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
52 #include "ocrdma_hw.h"
53 #include "ocrdma_verbs.h"
54 #include <rdma/ocrdma-abi.h>
56 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
66 int index, union ib_gid *sgid)
69 struct ocrdma_dev *dev;
71 dev = get_ocrdma_dev(ibdev);
72 memset(sgid, 0, sizeof(*sgid));
73 if (index >= OCRDMA_MAX_SGID)
76 ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
78 memcpy(sgid, &zgid, sizeof(*sgid));
85 int ocrdma_add_gid(struct ib_device *device,
88 const union ib_gid *gid,
89 const struct ib_gid_attr *attr,
94 int ocrdma_del_gid(struct ib_device *device,
101 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
102 struct ib_udata *uhw)
104 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
106 if (uhw->inlen || uhw->outlen)
109 memset(attr, 0, sizeof *attr);
110 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
111 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
112 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
113 attr->max_mr_size = dev->attr.max_mr_size;
114 attr->page_size_cap = 0xffff000;
115 attr->vendor_id = dev->nic_info.pdev->vendor;
116 attr->vendor_part_id = dev->nic_info.pdev->device;
117 attr->hw_ver = dev->asic_id;
118 attr->max_qp = dev->attr.max_qp;
119 attr->max_ah = OCRDMA_MAX_AH;
120 attr->max_qp_wr = dev->attr.max_wqe;
122 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
123 IB_DEVICE_RC_RNR_NAK_GEN |
124 IB_DEVICE_SHUTDOWN_PORT |
125 IB_DEVICE_SYS_IMAGE_GUID |
126 IB_DEVICE_LOCAL_DMA_LKEY |
127 IB_DEVICE_MEM_MGT_EXTENSIONS;
128 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
129 attr->max_sge_rd = dev->attr.max_rdma_sge;
130 attr->max_cq = dev->attr.max_cq;
131 attr->max_cqe = dev->attr.max_cqe;
132 attr->max_mr = dev->attr.max_mr;
133 attr->max_mw = dev->attr.max_mw;
134 attr->max_pd = dev->attr.max_pd;
135 attr->atomic_cap = 0;
137 attr->max_map_per_fmr = 0;
138 attr->max_qp_rd_atom =
139 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
140 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
141 attr->max_srq = dev->attr.max_srq;
142 attr->max_srq_sge = dev->attr.max_srq_sge;
143 attr->max_srq_wr = dev->attr.max_rqe;
144 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
145 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
150 struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
152 struct ocrdma_dev *dev;
153 struct net_device *ndev = NULL;
157 dev = get_ocrdma_dev(ibdev);
159 ndev = dev->nic_info.netdev;
168 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
169 u8 *ib_speed, u8 *ib_width)
174 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
176 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
179 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
180 *ib_speed = IB_SPEED_SDR;
181 *ib_width = IB_WIDTH_1X;
184 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
185 *ib_speed = IB_SPEED_QDR;
186 *ib_width = IB_WIDTH_1X;
189 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
190 *ib_speed = IB_SPEED_DDR;
191 *ib_width = IB_WIDTH_4X;
194 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
195 *ib_speed = IB_SPEED_QDR;
196 *ib_width = IB_WIDTH_4X;
201 *ib_speed = IB_SPEED_SDR;
202 *ib_width = IB_WIDTH_1X;
206 int ocrdma_query_port(struct ib_device *ibdev,
207 u8 port, struct ib_port_attr *props)
209 enum ib_port_state port_state;
210 struct ocrdma_dev *dev;
211 struct net_device *netdev;
213 /* props being zeroed by the caller, avoid zeroing it here */
214 dev = get_ocrdma_dev(ibdev);
216 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
220 netdev = dev->nic_info.netdev;
221 if (netif_running(netdev) && netif_oper_up(netdev)) {
222 port_state = IB_PORT_ACTIVE;
223 props->phys_state = 5;
225 port_state = IB_PORT_DOWN;
226 props->phys_state = 3;
228 props->max_mtu = IB_MTU_4096;
229 props->active_mtu = iboe_get_mtu(netdev->mtu);
234 props->state = port_state;
235 props->port_cap_flags =
238 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
239 IB_PORT_IP_BASED_GIDS;
240 props->gid_tbl_len = OCRDMA_MAX_SGID;
241 props->pkey_tbl_len = 1;
242 props->bad_pkey_cntr = 0;
243 props->qkey_viol_cntr = 0;
244 get_link_speed_and_width(dev, &props->active_speed,
245 &props->active_width);
246 props->max_msg_sz = 0x80000000;
247 props->max_vl_num = 4;
251 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
252 struct ib_port_modify *props)
254 struct ocrdma_dev *dev;
256 dev = get_ocrdma_dev(ibdev);
258 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
264 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
267 struct ocrdma_mm *mm;
269 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
272 mm->key.phy_addr = phy_addr;
274 INIT_LIST_HEAD(&mm->entry);
276 mutex_lock(&uctx->mm_list_lock);
277 list_add_tail(&mm->entry, &uctx->mm_head);
278 mutex_unlock(&uctx->mm_list_lock);
282 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
285 struct ocrdma_mm *mm, *tmp;
287 mutex_lock(&uctx->mm_list_lock);
288 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
289 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
292 list_del(&mm->entry);
296 mutex_unlock(&uctx->mm_list_lock);
299 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
303 struct ocrdma_mm *mm;
305 mutex_lock(&uctx->mm_list_lock);
306 list_for_each_entry(mm, &uctx->mm_head, entry) {
307 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
313 mutex_unlock(&uctx->mm_list_lock);
318 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
320 u16 pd_bitmap_idx = 0;
321 const unsigned long *pd_bitmap;
324 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
325 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
326 dev->pd_mgr->max_dpp_pd);
327 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
328 dev->pd_mgr->pd_dpp_count++;
329 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
330 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
332 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
333 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
334 dev->pd_mgr->max_normal_pd);
335 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
336 dev->pd_mgr->pd_norm_count++;
337 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
338 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
340 return pd_bitmap_idx;
343 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
349 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
350 dev->pd_mgr->pd_norm_count;
355 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
356 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
359 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
360 dev->pd_mgr->pd_dpp_count--;
363 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
364 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
367 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
368 dev->pd_mgr->pd_norm_count--;
375 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
380 mutex_lock(&dev->dev_lock);
381 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
382 mutex_unlock(&dev->dev_lock);
386 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
391 mutex_lock(&dev->dev_lock);
392 if (pd->dpp_enabled) {
393 /* try allocating DPP PD, if not available then normal PD */
394 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
395 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
396 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
397 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
398 } else if (dev->pd_mgr->pd_norm_count <
399 dev->pd_mgr->max_normal_pd) {
400 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
401 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
402 pd->dpp_enabled = false;
407 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
408 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
409 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
414 mutex_unlock(&dev->dev_lock);
418 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
419 struct ocrdma_ucontext *uctx,
420 struct ib_udata *udata)
422 struct ocrdma_pd *pd = NULL;
425 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
427 return ERR_PTR(-ENOMEM);
429 if (udata && uctx && dev->attr.max_dpp_pds) {
431 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
433 pd->dpp_enabled ? (dev->nic_info.db_page_size /
434 dev->attr.wqe_size) : 0;
437 if (dev->pd_mgr->pd_prealloc_valid) {
438 status = ocrdma_get_pd_num(dev, pd);
443 return ERR_PTR(status);
448 status = ocrdma_mbx_alloc_pd(dev, pd);
450 if (pd->dpp_enabled) {
451 pd->dpp_enabled = false;
456 return ERR_PTR(status);
463 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
464 struct ocrdma_pd *pd)
466 return (uctx->cntxt_pd == pd ? true : false);
469 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
470 struct ocrdma_pd *pd)
474 if (dev->pd_mgr->pd_prealloc_valid)
475 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
477 status = ocrdma_mbx_dealloc_pd(dev, pd);
483 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
484 struct ocrdma_ucontext *uctx,
485 struct ib_udata *udata)
489 uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
490 if (IS_ERR(uctx->cntxt_pd)) {
491 status = PTR_ERR(uctx->cntxt_pd);
492 uctx->cntxt_pd = NULL;
496 uctx->cntxt_pd->uctx = uctx;
497 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
502 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
504 struct ocrdma_pd *pd = uctx->cntxt_pd;
505 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
507 if (uctx->pd_in_use) {
508 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
509 __func__, dev->id, pd->id);
511 uctx->cntxt_pd = NULL;
512 (void)_ocrdma_dealloc_pd(dev, pd);
516 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
518 struct ocrdma_pd *pd = NULL;
520 mutex_lock(&uctx->mm_list_lock);
521 if (!uctx->pd_in_use) {
522 uctx->pd_in_use = true;
525 mutex_unlock(&uctx->mm_list_lock);
530 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
532 mutex_lock(&uctx->mm_list_lock);
533 uctx->pd_in_use = false;
534 mutex_unlock(&uctx->mm_list_lock);
537 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
538 struct ib_udata *udata)
541 struct ocrdma_ucontext *ctx;
542 struct ocrdma_alloc_ucontext_resp resp;
543 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
544 struct pci_dev *pdev = dev->nic_info.pdev;
545 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
548 return ERR_PTR(-EFAULT);
549 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
551 return ERR_PTR(-ENOMEM);
552 INIT_LIST_HEAD(&ctx->mm_head);
553 mutex_init(&ctx->mm_list_lock);
555 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
556 &ctx->ah_tbl.pa, GFP_KERNEL);
557 if (!ctx->ah_tbl.va) {
559 return ERR_PTR(-ENOMEM);
561 memset(ctx->ah_tbl.va, 0, map_len);
562 ctx->ah_tbl.len = map_len;
564 memset(&resp, 0, sizeof(resp));
565 resp.ah_tbl_len = ctx->ah_tbl.len;
566 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
568 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
572 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
576 resp.dev_id = dev->id;
577 resp.max_inline_data = dev->attr.max_inline_data;
578 resp.wqe_size = dev->attr.wqe_size;
579 resp.rqe_size = dev->attr.rqe_size;
580 resp.dpp_wqe_size = dev->attr.wqe_size;
582 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
583 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
586 return &ctx->ibucontext;
590 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
592 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
595 return ERR_PTR(status);
598 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
601 struct ocrdma_mm *mm, *tmp;
602 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
603 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
604 struct pci_dev *pdev = dev->nic_info.pdev;
606 status = ocrdma_dealloc_ucontext_pd(uctx);
608 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
609 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
612 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
613 list_del(&mm->entry);
620 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
622 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
623 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
624 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
625 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
626 unsigned long len = (vma->vm_end - vma->vm_start);
630 if (vma->vm_start & (PAGE_SIZE - 1))
632 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
636 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
637 dev->nic_info.db_total_size)) &&
638 (len <= dev->nic_info.db_page_size)) {
639 if (vma->vm_flags & VM_READ)
642 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
643 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
644 len, vma->vm_page_prot);
645 } else if (dev->nic_info.dpp_unmapped_len &&
646 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
647 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
648 dev->nic_info.dpp_unmapped_len)) &&
649 (len <= dev->nic_info.dpp_unmapped_len)) {
650 if (vma->vm_flags & VM_READ)
653 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
654 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
655 len, vma->vm_page_prot);
657 status = remap_pfn_range(vma, vma->vm_start,
658 vma->vm_pgoff, len, vma->vm_page_prot);
663 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
664 struct ib_ucontext *ib_ctx,
665 struct ib_udata *udata)
669 u64 dpp_page_addr = 0;
671 struct ocrdma_alloc_pd_uresp rsp;
672 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
674 memset(&rsp, 0, sizeof(rsp));
676 rsp.dpp_enabled = pd->dpp_enabled;
677 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
678 db_page_size = dev->nic_info.db_page_size;
680 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
684 if (pd->dpp_enabled) {
685 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
686 (pd->id * PAGE_SIZE);
687 status = ocrdma_add_mmap(uctx, dpp_page_addr,
691 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
692 rsp.dpp_page_addr_lo = dpp_page_addr;
695 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
704 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
706 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
710 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
711 struct ib_ucontext *context,
712 struct ib_udata *udata)
714 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
715 struct ocrdma_pd *pd;
716 struct ocrdma_ucontext *uctx = NULL;
718 u8 is_uctx_pd = false;
720 if (udata && context) {
721 uctx = get_ocrdma_ucontext(context);
722 pd = ocrdma_get_ucontext_pd(uctx);
729 pd = _ocrdma_alloc_pd(dev, uctx, udata);
731 status = PTR_ERR(pd);
736 if (udata && context) {
737 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
745 ocrdma_release_ucontext_pd(uctx);
747 status = _ocrdma_dealloc_pd(dev, pd);
750 return ERR_PTR(status);
753 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
755 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
756 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
757 struct ocrdma_ucontext *uctx = NULL;
763 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
764 (pd->id * PAGE_SIZE);
766 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
767 usr_db = ocrdma_get_db_addr(dev, pd->id);
768 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
770 if (is_ucontext_pd(uctx, pd)) {
771 ocrdma_release_ucontext_pd(uctx);
775 status = _ocrdma_dealloc_pd(dev, pd);
779 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
780 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
785 mr->hwmr.local_rd = 1;
786 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
787 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
788 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
789 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
790 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
791 mr->hwmr.num_pbls = num_pbls;
793 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
797 mr->ibmr.lkey = mr->hwmr.lkey;
798 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
799 mr->ibmr.rkey = mr->hwmr.lkey;
803 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
806 struct ocrdma_mr *mr;
807 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
808 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
810 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
811 pr_err("%s err, invalid access rights\n", __func__);
812 return ERR_PTR(-EINVAL);
815 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
817 return ERR_PTR(-ENOMEM);
819 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
820 OCRDMA_ADDR_CHECK_DISABLE);
823 return ERR_PTR(status);
829 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
830 struct ocrdma_hw_mr *mr)
832 struct pci_dev *pdev = dev->nic_info.pdev;
836 for (i = 0; i < mr->num_pbls; i++) {
837 if (!mr->pbl_table[i].va)
839 dma_free_coherent(&pdev->dev, mr->pbl_size,
841 mr->pbl_table[i].pa);
843 kfree(mr->pbl_table);
844 mr->pbl_table = NULL;
848 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
857 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
858 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
862 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
863 num_pbls = num_pbls / (pbl_size / sizeof(u64));
865 } while (num_pbls >= dev->attr.max_num_mr_pbl);
867 mr->hwmr.num_pbes = num_pbes;
868 mr->hwmr.num_pbls = num_pbls;
869 mr->hwmr.pbl_size = pbl_size;
873 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
877 u32 dma_len = mr->pbl_size;
878 struct pci_dev *pdev = dev->nic_info.pdev;
882 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
883 mr->num_pbls, GFP_KERNEL);
888 for (i = 0; i < mr->num_pbls; i++) {
889 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
891 ocrdma_free_mr_pbl_tbl(dev, mr);
895 memset(va, 0, dma_len);
896 mr->pbl_table[i].va = va;
897 mr->pbl_table[i].pa = pa;
902 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
905 struct ocrdma_pbe *pbe;
906 struct scatterlist *sg;
907 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
908 struct ib_umem *umem = mr->umem;
909 int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
911 if (!mr->hwmr.num_pbes)
914 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
917 shift = ilog2(umem->page_size);
919 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
920 pages = sg_dma_len(sg) >> shift;
921 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
922 /* store the page address in pbe */
924 cpu_to_le32(sg_dma_address
926 (umem->page_size * pg_cnt));
928 cpu_to_le32(upper_32_bits
931 umem->page_size * pg_cnt)));
936 /* if done building pbes, issue the mbx cmd. */
937 if (total_num_pbes == num_pbes)
940 /* if the given pbl is full storing the pbes,
944 (mr->hwmr.pbl_size / sizeof(u64))) {
946 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
954 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
955 u64 usr_addr, int acc, struct ib_udata *udata)
957 int status = -ENOMEM;
958 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
959 struct ocrdma_mr *mr;
960 struct ocrdma_pd *pd;
963 pd = get_ocrdma_pd(ibpd);
965 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
966 return ERR_PTR(-EINVAL);
968 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
970 return ERR_PTR(status);
971 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
972 if (IS_ERR(mr->umem)) {
976 num_pbes = ib_umem_page_count(mr->umem);
977 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
981 mr->hwmr.pbe_size = mr->umem->page_size;
982 mr->hwmr.fbo = ib_umem_offset(mr->umem);
983 mr->hwmr.va = usr_addr;
985 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
986 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
987 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
988 mr->hwmr.local_rd = 1;
989 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
990 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
993 build_user_pbes(dev, mr, num_pbes);
994 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
997 mr->ibmr.lkey = mr->hwmr.lkey;
998 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
999 mr->ibmr.rkey = mr->hwmr.lkey;
1004 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
1007 return ERR_PTR(status);
1010 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
1012 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
1013 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
1015 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
1018 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
1020 /* it could be user registered memory. */
1022 ib_umem_release(mr->umem);
1025 /* Don't stop cleanup, in case FW is unresponsive */
1026 if (dev->mqe_ctx.fw_error_state) {
1027 pr_err("%s(%d) fw not responding.\n",
1033 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1034 struct ib_udata *udata,
1035 struct ib_ucontext *ib_ctx)
1038 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
1039 struct ocrdma_create_cq_uresp uresp;
1041 memset(&uresp, 0, sizeof(uresp));
1042 uresp.cq_id = cq->id;
1043 uresp.page_size = PAGE_ALIGN(cq->len);
1044 uresp.num_pages = 1;
1045 uresp.max_hw_cqe = cq->max_hw_cqe;
1046 uresp.page_addr[0] = virt_to_phys(cq->va);
1047 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
1048 uresp.db_page_size = dev->nic_info.db_page_size;
1049 uresp.phase_change = cq->phase_change ? 1 : 0;
1050 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1052 pr_err("%s(%d) copy error cqid=0x%x.\n",
1053 __func__, dev->id, cq->id);
1056 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1059 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
1061 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1064 cq->ucontext = uctx;
1069 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1070 const struct ib_cq_init_attr *attr,
1071 struct ib_ucontext *ib_ctx,
1072 struct ib_udata *udata)
1074 int entries = attr->cqe;
1075 struct ocrdma_cq *cq;
1076 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1077 struct ocrdma_ucontext *uctx = NULL;
1080 struct ocrdma_create_cq_ureq ureq;
1083 return ERR_PTR(-EINVAL);
1086 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1087 return ERR_PTR(-EFAULT);
1090 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1092 return ERR_PTR(-ENOMEM);
1094 spin_lock_init(&cq->cq_lock);
1095 spin_lock_init(&cq->comp_handler_lock);
1096 INIT_LIST_HEAD(&cq->sq_head);
1097 INIT_LIST_HEAD(&cq->rq_head);
1100 uctx = get_ocrdma_ucontext(ib_ctx);
1101 pd_id = uctx->cntxt_pd->id;
1104 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1107 return ERR_PTR(status);
1110 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1114 cq->phase = OCRDMA_CQE_VALID;
1115 dev->cq_tbl[cq->id] = cq;
1119 ocrdma_mbx_destroy_cq(dev, cq);
1121 return ERR_PTR(status);
1124 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1125 struct ib_udata *udata)
1128 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1130 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1134 ibcq->cqe = new_cnt;
1138 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1141 int valid_count = 0;
1142 unsigned long flags;
1144 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1145 struct ocrdma_cqe *cqe = NULL;
1148 cqe_cnt = cq->cqe_cnt;
1150 /* Last irq might have scheduled a polling thread
1151 * sync-up with it before hard flushing.
1153 spin_lock_irqsave(&cq->cq_lock, flags);
1155 if (is_cqe_valid(cq, cqe))
1160 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1161 spin_unlock_irqrestore(&cq->cq_lock, flags);
1164 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1166 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1167 struct ocrdma_eq *eq = NULL;
1168 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1172 dev->cq_tbl[cq->id] = NULL;
1173 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1174 BUG_ON(indx == -EINVAL);
1176 eq = &dev->eq_tbl[indx];
1177 irq = ocrdma_get_irq(dev, eq);
1178 synchronize_irq(irq);
1179 ocrdma_flush_cq(cq);
1181 (void)ocrdma_mbx_destroy_cq(dev, cq);
1183 pdid = cq->ucontext->cntxt_pd->id;
1184 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1185 PAGE_ALIGN(cq->len));
1186 ocrdma_del_mmap(cq->ucontext,
1187 ocrdma_get_db_addr(dev, pdid),
1188 dev->nic_info.db_page_size);
1195 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1197 int status = -EINVAL;
1199 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1200 dev->qp_tbl[qp->id] = qp;
1206 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1208 dev->qp_tbl[qp->id] = NULL;
1211 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1212 struct ib_qp_init_attr *attrs)
1214 if ((attrs->qp_type != IB_QPT_GSI) &&
1215 (attrs->qp_type != IB_QPT_RC) &&
1216 (attrs->qp_type != IB_QPT_UC) &&
1217 (attrs->qp_type != IB_QPT_UD)) {
1218 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1219 __func__, dev->id, attrs->qp_type);
1222 /* Skip the check for QP1 to support CM size of 128 */
1223 if ((attrs->qp_type != IB_QPT_GSI) &&
1224 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1225 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1226 __func__, dev->id, attrs->cap.max_send_wr);
1227 pr_err("%s(%d) supported send_wr=0x%x\n",
1228 __func__, dev->id, dev->attr.max_wqe);
1231 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1232 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1233 __func__, dev->id, attrs->cap.max_recv_wr);
1234 pr_err("%s(%d) supported recv_wr=0x%x\n",
1235 __func__, dev->id, dev->attr.max_rqe);
1238 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1239 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1240 __func__, dev->id, attrs->cap.max_inline_data);
1241 pr_err("%s(%d) supported inline data size=0x%x\n",
1242 __func__, dev->id, dev->attr.max_inline_data);
1245 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1246 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1247 __func__, dev->id, attrs->cap.max_send_sge);
1248 pr_err("%s(%d) supported send_sge=0x%x\n",
1249 __func__, dev->id, dev->attr.max_send_sge);
1252 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1253 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1254 __func__, dev->id, attrs->cap.max_recv_sge);
1255 pr_err("%s(%d) supported recv_sge=0x%x\n",
1256 __func__, dev->id, dev->attr.max_recv_sge);
1259 /* unprivileged user space cannot create special QP */
1260 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1262 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1263 __func__, dev->id, attrs->qp_type);
1266 /* allow creating only one GSI type of QP */
1267 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1268 pr_err("%s(%d) GSI special QPs already created.\n",
1272 /* verify consumer QPs are not trying to use GSI QP's CQ */
1273 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1274 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1275 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1276 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1284 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1285 struct ib_udata *udata, int dpp_offset,
1286 int dpp_credit_lmt, int srq)
1290 struct ocrdma_create_qp_uresp uresp;
1291 struct ocrdma_pd *pd = qp->pd;
1292 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1294 memset(&uresp, 0, sizeof(uresp));
1295 usr_db = dev->nic_info.unmapped_db +
1296 (pd->id * dev->nic_info.db_page_size);
1297 uresp.qp_id = qp->id;
1298 uresp.sq_dbid = qp->sq.dbid;
1299 uresp.num_sq_pages = 1;
1300 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1301 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1302 uresp.num_wqe_allocated = qp->sq.max_cnt;
1304 uresp.rq_dbid = qp->rq.dbid;
1305 uresp.num_rq_pages = 1;
1306 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1307 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1308 uresp.num_rqe_allocated = qp->rq.max_cnt;
1310 uresp.db_page_addr = usr_db;
1311 uresp.db_page_size = dev->nic_info.db_page_size;
1312 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1313 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1314 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1316 if (qp->dpp_enabled) {
1317 uresp.dpp_credit = dpp_credit_lmt;
1318 uresp.dpp_offset = dpp_offset;
1320 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1322 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1325 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1326 uresp.sq_page_size);
1331 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1332 uresp.rq_page_size);
1338 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1343 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1344 struct ocrdma_pd *pd)
1346 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1347 qp->sq_db = dev->nic_info.db +
1348 (pd->id * dev->nic_info.db_page_size) +
1349 OCRDMA_DB_GEN2_SQ_OFFSET;
1350 qp->rq_db = dev->nic_info.db +
1351 (pd->id * dev->nic_info.db_page_size) +
1352 OCRDMA_DB_GEN2_RQ_OFFSET;
1354 qp->sq_db = dev->nic_info.db +
1355 (pd->id * dev->nic_info.db_page_size) +
1356 OCRDMA_DB_SQ_OFFSET;
1357 qp->rq_db = dev->nic_info.db +
1358 (pd->id * dev->nic_info.db_page_size) +
1359 OCRDMA_DB_RQ_OFFSET;
1363 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1366 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1368 if (qp->wqe_wr_id_tbl == NULL)
1371 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1372 if (qp->rqe_wr_id_tbl == NULL)
1378 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1379 struct ocrdma_pd *pd,
1380 struct ib_qp_init_attr *attrs)
1383 spin_lock_init(&qp->q_lock);
1384 INIT_LIST_HEAD(&qp->sq_entry);
1385 INIT_LIST_HEAD(&qp->rq_entry);
1387 qp->qp_type = attrs->qp_type;
1388 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1389 qp->max_inline_data = attrs->cap.max_inline_data;
1390 qp->sq.max_sges = attrs->cap.max_send_sge;
1391 qp->rq.max_sges = attrs->cap.max_recv_sge;
1392 qp->state = OCRDMA_QPS_RST;
1393 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1396 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1397 struct ib_qp_init_attr *attrs)
1399 if (attrs->qp_type == IB_QPT_GSI) {
1400 dev->gsi_qp_created = 1;
1401 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1402 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1406 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1407 struct ib_qp_init_attr *attrs,
1408 struct ib_udata *udata)
1411 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1412 struct ocrdma_qp *qp;
1413 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1414 struct ocrdma_create_qp_ureq ureq;
1415 u16 dpp_credit_lmt, dpp_offset;
1417 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1421 memset(&ureq, 0, sizeof(ureq));
1423 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1424 return ERR_PTR(-EFAULT);
1426 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1431 ocrdma_set_qp_init_params(qp, pd, attrs);
1433 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1434 OCRDMA_QP_FAST_REG);
1436 mutex_lock(&dev->dev_lock);
1437 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1439 &dpp_offset, &dpp_credit_lmt);
1443 /* user space QP's wr_id table are managed in library */
1444 if (udata == NULL) {
1445 status = ocrdma_alloc_wr_id_tbl(qp);
1450 status = ocrdma_add_qpn_map(dev, qp);
1453 ocrdma_set_qp_db(dev, qp, pd);
1455 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1457 (attrs->srq != NULL));
1461 ocrdma_store_gsi_qp_cq(dev, attrs);
1462 qp->ibqp.qp_num = qp->id;
1463 mutex_unlock(&dev->dev_lock);
1467 ocrdma_del_qpn_map(dev, qp);
1469 ocrdma_mbx_destroy_qp(dev, qp);
1471 mutex_unlock(&dev->dev_lock);
1472 kfree(qp->wqe_wr_id_tbl);
1473 kfree(qp->rqe_wr_id_tbl);
1475 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1477 return ERR_PTR(status);
1480 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1484 struct ocrdma_qp *qp;
1485 struct ocrdma_dev *dev;
1486 enum ib_qp_state old_qps;
1488 qp = get_ocrdma_qp(ibqp);
1489 dev = get_ocrdma_dev(ibqp->device);
1490 if (attr_mask & IB_QP_STATE)
1491 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1492 /* if new and previous states are same hw doesn't need to
1497 return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1500 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1501 int attr_mask, struct ib_udata *udata)
1503 unsigned long flags;
1504 int status = -EINVAL;
1505 struct ocrdma_qp *qp;
1506 struct ocrdma_dev *dev;
1507 enum ib_qp_state old_qps, new_qps;
1509 qp = get_ocrdma_qp(ibqp);
1510 dev = get_ocrdma_dev(ibqp->device);
1512 /* syncronize with multiple context trying to change, retrive qps */
1513 mutex_lock(&dev->dev_lock);
1514 /* syncronize with wqe, rqe posting and cqe processing contexts */
1515 spin_lock_irqsave(&qp->q_lock, flags);
1516 old_qps = get_ibqp_state(qp->state);
1517 if (attr_mask & IB_QP_STATE)
1518 new_qps = attr->qp_state;
1521 spin_unlock_irqrestore(&qp->q_lock, flags);
1523 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1524 IB_LINK_LAYER_ETHERNET)) {
1525 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1526 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1527 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1532 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1536 mutex_unlock(&dev->dev_lock);
1540 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1558 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1560 int ib_qp_acc_flags = 0;
1562 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1563 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1564 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1565 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1566 return ib_qp_acc_flags;
1569 int ocrdma_query_qp(struct ib_qp *ibqp,
1570 struct ib_qp_attr *qp_attr,
1571 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1575 struct ocrdma_qp_params params;
1576 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1577 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1579 memset(¶ms, 0, sizeof(params));
1580 mutex_lock(&dev->dev_lock);
1581 status = ocrdma_mbx_query_qp(dev, qp, ¶ms);
1582 mutex_unlock(&dev->dev_lock);
1585 if (qp->qp_type == IB_QPT_UD)
1586 qp_attr->qkey = params.qkey;
1588 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1589 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1590 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1591 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1592 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1593 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1594 qp_attr->dest_qp_num =
1595 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1597 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1598 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1599 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1600 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1601 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1602 qp_attr->cap.max_inline_data = qp->max_inline_data;
1603 qp_init_attr->cap = qp_attr->cap;
1604 memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0],
1605 sizeof(params.dgid));
1606 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1607 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1608 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1609 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1610 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1611 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1612 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1613 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1614 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1616 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1617 qp_attr->ah_attr.port_num = 1;
1618 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1619 OCRDMA_QP_PARAMS_SL_MASK) >>
1620 OCRDMA_QP_PARAMS_SL_SHIFT;
1621 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1622 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1623 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1624 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1625 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1626 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1627 qp_attr->retry_cnt =
1628 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1629 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1630 qp_attr->min_rnr_timer = 0;
1631 qp_attr->pkey_index = 0;
1632 qp_attr->port_num = 1;
1633 qp_attr->ah_attr.src_path_bits = 0;
1634 qp_attr->ah_attr.static_rate = 0;
1635 qp_attr->alt_pkey_index = 0;
1636 qp_attr->alt_port_num = 0;
1637 qp_attr->alt_timeout = 0;
1638 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1639 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1640 OCRDMA_QP_PARAMS_STATE_SHIFT;
1641 qp_attr->qp_state = get_ibqp_state(qp_state);
1642 qp_attr->cur_qp_state = qp_attr->qp_state;
1643 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1644 qp_attr->max_dest_rd_atomic =
1645 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1646 qp_attr->max_rd_atomic =
1647 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1648 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1649 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1650 /* Sync driver QP state with FW */
1651 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1656 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1658 unsigned int i = idx / 32;
1659 u32 mask = (1U << (idx % 32));
1661 srq->idx_bit_fields[i] ^= mask;
1664 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1666 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1669 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1671 return (qp->sq.tail == qp->sq.head);
1674 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1676 return (qp->rq.tail == qp->rq.head);
1679 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1681 return q->va + (q->head * q->entry_size);
1684 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1687 return q->va + (idx * q->entry_size);
1690 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1692 q->head = (q->head + 1) & q->max_wqe_idx;
1695 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1697 q->tail = (q->tail + 1) & q->max_wqe_idx;
1700 /* discard the cqe for a given QP */
1701 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1703 unsigned long cq_flags;
1704 unsigned long flags;
1705 int discard_cnt = 0;
1706 u32 cur_getp, stop_getp;
1707 struct ocrdma_cqe *cqe;
1708 u32 qpn = 0, wqe_idx = 0;
1710 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1712 /* traverse through the CQEs in the hw CQ,
1713 * find the matching CQE for a given qp,
1714 * mark the matching one discarded by clearing qpn.
1715 * ring the doorbell in the poll_cq() as
1716 * we don't complete out of order cqe.
1719 cur_getp = cq->getp;
1720 /* find upto when do we reap the cq. */
1721 stop_getp = cur_getp;
1723 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1726 cqe = cq->va + cur_getp;
1727 /* if (a) done reaping whole hw cq, or
1728 * (b) qp_xq becomes empty.
1731 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1732 /* if previously discarded cqe found, skip that too. */
1733 /* check for matching qp */
1734 if (qpn == 0 || qpn != qp->id)
1737 if (is_cqe_for_sq(cqe)) {
1738 ocrdma_hwq_inc_tail(&qp->sq);
1741 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1742 OCRDMA_CQE_BUFTAG_SHIFT) &
1743 qp->srq->rq.max_wqe_idx;
1744 BUG_ON(wqe_idx < 1);
1745 spin_lock_irqsave(&qp->srq->q_lock, flags);
1746 ocrdma_hwq_inc_tail(&qp->srq->rq);
1747 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1748 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1751 ocrdma_hwq_inc_tail(&qp->rq);
1754 /* mark cqe discarded so that it is not picked up later
1760 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1761 } while (cur_getp != stop_getp);
1762 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1765 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1768 unsigned long flags;
1769 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1770 /* sync with any active CQ poll */
1772 spin_lock_irqsave(&dev->flush_q_lock, flags);
1773 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1775 list_del(&qp->sq_entry);
1777 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1779 list_del(&qp->rq_entry);
1781 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1784 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1786 struct ocrdma_pd *pd;
1787 struct ocrdma_qp *qp;
1788 struct ocrdma_dev *dev;
1789 struct ib_qp_attr attrs;
1791 unsigned long flags;
1793 qp = get_ocrdma_qp(ibqp);
1794 dev = get_ocrdma_dev(ibqp->device);
1798 /* change the QP state to ERROR */
1799 if (qp->state != OCRDMA_QPS_RST) {
1800 attrs.qp_state = IB_QPS_ERR;
1801 attr_mask = IB_QP_STATE;
1802 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1804 /* ensure that CQEs for newly created QP (whose id may be same with
1805 * one which just getting destroyed are same), dont get
1806 * discarded until the old CQEs are discarded.
1808 mutex_lock(&dev->dev_lock);
1809 (void) ocrdma_mbx_destroy_qp(dev, qp);
1812 * acquire CQ lock while destroy is in progress, in order to
1813 * protect against proessing in-flight CQEs for this QP.
1815 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1816 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1817 spin_lock(&qp->rq_cq->cq_lock);
1819 ocrdma_del_qpn_map(dev, qp);
1821 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1822 spin_unlock(&qp->rq_cq->cq_lock);
1823 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1826 ocrdma_discard_cqes(qp, qp->sq_cq);
1827 ocrdma_discard_cqes(qp, qp->rq_cq);
1829 mutex_unlock(&dev->dev_lock);
1832 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1833 PAGE_ALIGN(qp->sq.len));
1835 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1836 PAGE_ALIGN(qp->rq.len));
1839 ocrdma_del_flush_qp(qp);
1841 kfree(qp->wqe_wr_id_tbl);
1842 kfree(qp->rqe_wr_id_tbl);
1847 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1848 struct ib_udata *udata)
1851 struct ocrdma_create_srq_uresp uresp;
1853 memset(&uresp, 0, sizeof(uresp));
1854 uresp.rq_dbid = srq->rq.dbid;
1855 uresp.num_rq_pages = 1;
1856 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1857 uresp.rq_page_size = srq->rq.len;
1858 uresp.db_page_addr = dev->nic_info.unmapped_db +
1859 (srq->pd->id * dev->nic_info.db_page_size);
1860 uresp.db_page_size = dev->nic_info.db_page_size;
1861 uresp.num_rqe_allocated = srq->rq.max_cnt;
1862 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1863 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1864 uresp.db_shift = 24;
1866 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1867 uresp.db_shift = 16;
1870 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1873 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1874 uresp.rq_page_size);
1880 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1881 struct ib_srq_init_attr *init_attr,
1882 struct ib_udata *udata)
1884 int status = -ENOMEM;
1885 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1886 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1887 struct ocrdma_srq *srq;
1889 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1890 return ERR_PTR(-EINVAL);
1891 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1892 return ERR_PTR(-EINVAL);
1894 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1896 return ERR_PTR(status);
1898 spin_lock_init(&srq->q_lock);
1900 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1901 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1905 if (udata == NULL) {
1906 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1908 if (srq->rqe_wr_id_tbl == NULL)
1911 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1912 (srq->rq.max_cnt % 32 ? 1 : 0);
1913 srq->idx_bit_fields =
1914 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1915 if (srq->idx_bit_fields == NULL)
1917 memset(srq->idx_bit_fields, 0xff,
1918 srq->bit_fields_len * sizeof(u32));
1921 if (init_attr->attr.srq_limit) {
1922 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1928 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1936 ocrdma_mbx_destroy_srq(dev, srq);
1938 kfree(srq->rqe_wr_id_tbl);
1939 kfree(srq->idx_bit_fields);
1941 return ERR_PTR(status);
1944 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1945 struct ib_srq_attr *srq_attr,
1946 enum ib_srq_attr_mask srq_attr_mask,
1947 struct ib_udata *udata)
1950 struct ocrdma_srq *srq;
1952 srq = get_ocrdma_srq(ibsrq);
1953 if (srq_attr_mask & IB_SRQ_MAX_WR)
1956 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1960 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1963 struct ocrdma_srq *srq;
1965 srq = get_ocrdma_srq(ibsrq);
1966 status = ocrdma_mbx_query_srq(srq, srq_attr);
1970 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1973 struct ocrdma_srq *srq;
1974 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1976 srq = get_ocrdma_srq(ibsrq);
1978 status = ocrdma_mbx_destroy_srq(dev, srq);
1981 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1982 PAGE_ALIGN(srq->rq.len));
1984 kfree(srq->idx_bit_fields);
1985 kfree(srq->rqe_wr_id_tbl);
1990 /* unprivileged verbs and their support functions. */
1991 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1992 struct ocrdma_hdr_wqe *hdr,
1993 struct ib_send_wr *wr)
1995 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1996 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1997 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1999 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
2000 if (qp->qp_type == IB_QPT_GSI)
2001 ud_hdr->qkey = qp->qkey;
2003 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
2004 ud_hdr->rsvd_ahid = ah->id;
2005 ud_hdr->hdr_type = ah->hdr_type;
2006 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
2007 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
2010 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
2011 struct ocrdma_sge *sge, int num_sge,
2012 struct ib_sge *sg_list)
2016 for (i = 0; i < num_sge; i++) {
2017 sge[i].lrkey = sg_list[i].lkey;
2018 sge[i].addr_lo = sg_list[i].addr;
2019 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
2020 sge[i].len = sg_list[i].length;
2021 hdr->total_len += sg_list[i].length;
2024 memset(sge, 0, sizeof(*sge));
2027 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
2029 uint32_t total_len = 0, i;
2031 for (i = 0; i < num_sge; i++)
2032 total_len += sg_list[i].length;
2037 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
2038 struct ocrdma_hdr_wqe *hdr,
2039 struct ocrdma_sge *sge,
2040 struct ib_send_wr *wr, u32 wqe_size)
2045 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
2046 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
2047 if (unlikely(hdr->total_len > qp->max_inline_data)) {
2048 pr_err("%s() supported_len=0x%x,\n"
2049 " unsupported len req=0x%x\n", __func__,
2050 qp->max_inline_data, hdr->total_len);
2053 dpp_addr = (char *)sge;
2054 for (i = 0; i < wr->num_sge; i++) {
2056 (void *)(unsigned long)wr->sg_list[i].addr,
2057 wr->sg_list[i].length);
2058 dpp_addr += wr->sg_list[i].length;
2061 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
2062 if (0 == hdr->total_len)
2063 wqe_size += sizeof(struct ocrdma_sge);
2064 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2066 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2068 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2070 wqe_size += sizeof(struct ocrdma_sge);
2071 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2073 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2077 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2078 struct ib_send_wr *wr)
2081 struct ocrdma_sge *sge;
2082 u32 wqe_size = sizeof(*hdr);
2084 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2085 ocrdma_build_ud_hdr(qp, hdr, wr);
2086 sge = (struct ocrdma_sge *)(hdr + 2);
2087 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2089 sge = (struct ocrdma_sge *)(hdr + 1);
2092 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2096 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2097 struct ib_send_wr *wr)
2100 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2101 struct ocrdma_sge *sge = ext_rw + 1;
2102 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2104 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2107 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2108 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2109 ext_rw->lrkey = rdma_wr(wr)->rkey;
2110 ext_rw->len = hdr->total_len;
2114 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2115 struct ib_send_wr *wr)
2117 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2118 struct ocrdma_sge *sge = ext_rw + 1;
2119 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2120 sizeof(struct ocrdma_hdr_wqe);
2122 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2123 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2124 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2125 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2127 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2128 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2129 ext_rw->lrkey = rdma_wr(wr)->rkey;
2130 ext_rw->len = hdr->total_len;
2133 static int get_encoded_page_size(int pg_sz)
2135 /* Max size is 256M 4096 << 16 */
2138 if (pg_sz == (4096 << i))
2143 static int ocrdma_build_reg(struct ocrdma_qp *qp,
2144 struct ocrdma_hdr_wqe *hdr,
2145 struct ib_reg_wr *wr)
2148 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2149 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2150 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2151 struct ocrdma_pbe *pbe;
2152 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2153 int num_pbes = 0, i;
2155 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2157 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2158 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2160 if (wr->access & IB_ACCESS_LOCAL_WRITE)
2161 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2162 if (wr->access & IB_ACCESS_REMOTE_WRITE)
2163 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2164 if (wr->access & IB_ACCESS_REMOTE_READ)
2165 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2166 hdr->lkey = wr->key;
2167 hdr->total_len = mr->ibmr.length;
2169 fbo = mr->ibmr.iova - mr->pages[0];
2171 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2172 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2173 fast_reg->fbo_hi = upper_32_bits(fbo);
2174 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2175 fast_reg->num_sges = mr->npages;
2176 fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2179 for (i = 0; i < mr->npages; i++) {
2180 u64 buf_addr = mr->pages[i];
2182 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2183 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2187 /* if the pbl is full storing the pbes,
2190 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2192 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2199 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2201 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2203 iowrite32(val, qp->sq_db);
2206 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2207 struct ib_send_wr **bad_wr)
2210 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2211 struct ocrdma_hdr_wqe *hdr;
2212 unsigned long flags;
2214 spin_lock_irqsave(&qp->q_lock, flags);
2215 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2216 spin_unlock_irqrestore(&qp->q_lock, flags);
2222 if (qp->qp_type == IB_QPT_UD &&
2223 (wr->opcode != IB_WR_SEND &&
2224 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2229 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2230 wr->num_sge > qp->sq.max_sges) {
2235 hdr = ocrdma_hwq_head(&qp->sq);
2237 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2238 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2239 if (wr->send_flags & IB_SEND_FENCE)
2241 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2242 if (wr->send_flags & IB_SEND_SOLICITED)
2244 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2246 switch (wr->opcode) {
2247 case IB_WR_SEND_WITH_IMM:
2248 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2249 hdr->immdt = ntohl(wr->ex.imm_data);
2251 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2252 ocrdma_build_send(qp, hdr, wr);
2254 case IB_WR_SEND_WITH_INV:
2255 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2256 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2257 hdr->lkey = wr->ex.invalidate_rkey;
2258 status = ocrdma_build_send(qp, hdr, wr);
2260 case IB_WR_RDMA_WRITE_WITH_IMM:
2261 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2262 hdr->immdt = ntohl(wr->ex.imm_data);
2263 case IB_WR_RDMA_WRITE:
2264 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2265 status = ocrdma_build_write(qp, hdr, wr);
2267 case IB_WR_RDMA_READ:
2268 ocrdma_build_read(qp, hdr, wr);
2270 case IB_WR_LOCAL_INV:
2272 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2273 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2274 sizeof(struct ocrdma_sge)) /
2275 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2276 hdr->lkey = wr->ex.invalidate_rkey;
2279 status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2289 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2290 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2292 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2293 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2294 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2295 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2296 /* make sure wqe is written before adapter can access it */
2298 /* inform hw to start processing it */
2299 ocrdma_ring_sq_db(qp);
2301 /* update pointer, counter for next wr */
2302 ocrdma_hwq_inc_head(&qp->sq);
2305 spin_unlock_irqrestore(&qp->q_lock, flags);
2309 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2311 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2313 iowrite32(val, qp->rq_db);
2316 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2320 struct ocrdma_sge *sge;
2322 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2324 wqe_size = sizeof(*sge) + sizeof(*rqe);
2326 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2327 OCRDMA_WQE_SIZE_SHIFT);
2328 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2329 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2331 rqe->rsvd_tag = tag;
2332 sge = (struct ocrdma_sge *)(rqe + 1);
2333 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2334 ocrdma_cpu_to_le32(rqe, wqe_size);
2337 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2338 struct ib_recv_wr **bad_wr)
2341 unsigned long flags;
2342 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2343 struct ocrdma_hdr_wqe *rqe;
2345 spin_lock_irqsave(&qp->q_lock, flags);
2346 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2347 spin_unlock_irqrestore(&qp->q_lock, flags);
2352 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2353 wr->num_sge > qp->rq.max_sges) {
2358 rqe = ocrdma_hwq_head(&qp->rq);
2359 ocrdma_build_rqe(rqe, wr, 0);
2361 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2362 /* make sure rqe is written before adapter can access it */
2365 /* inform hw to start processing it */
2366 ocrdma_ring_rq_db(qp);
2368 /* update pointer, counter for next wr */
2369 ocrdma_hwq_inc_head(&qp->rq);
2372 spin_unlock_irqrestore(&qp->q_lock, flags);
2376 /* cqe for srq's rqe can potentially arrive out of order.
2377 * index gives the entry in the shadow table where to store
2378 * the wr_id. tag/index is returned in cqe to reference back
2381 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2386 for (row = 0; row < srq->bit_fields_len; row++) {
2387 if (srq->idx_bit_fields[row]) {
2388 indx = ffs(srq->idx_bit_fields[row]);
2389 indx = (row * 32) + (indx - 1);
2390 BUG_ON(indx >= srq->rq.max_cnt);
2391 ocrdma_srq_toggle_bit(srq, indx);
2396 BUG_ON(row == srq->bit_fields_len);
2397 return indx + 1; /* Use from index 1 */
2400 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2402 u32 val = srq->rq.dbid | (1 << 16);
2404 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2407 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2408 struct ib_recv_wr **bad_wr)
2411 unsigned long flags;
2412 struct ocrdma_srq *srq;
2413 struct ocrdma_hdr_wqe *rqe;
2416 srq = get_ocrdma_srq(ibsrq);
2418 spin_lock_irqsave(&srq->q_lock, flags);
2420 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2421 wr->num_sge > srq->rq.max_sges) {
2426 tag = ocrdma_srq_get_idx(srq);
2427 rqe = ocrdma_hwq_head(&srq->rq);
2428 ocrdma_build_rqe(rqe, wr, tag);
2430 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2431 /* make sure rqe is written before adapter can perform DMA */
2433 /* inform hw to start processing it */
2434 ocrdma_ring_srq_db(srq);
2435 /* update pointer, counter for next wr */
2436 ocrdma_hwq_inc_head(&srq->rq);
2439 spin_unlock_irqrestore(&srq->q_lock, flags);
2443 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2445 enum ib_wc_status ibwc_status;
2448 case OCRDMA_CQE_GENERAL_ERR:
2449 ibwc_status = IB_WC_GENERAL_ERR;
2451 case OCRDMA_CQE_LOC_LEN_ERR:
2452 ibwc_status = IB_WC_LOC_LEN_ERR;
2454 case OCRDMA_CQE_LOC_QP_OP_ERR:
2455 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2457 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2458 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2460 case OCRDMA_CQE_LOC_PROT_ERR:
2461 ibwc_status = IB_WC_LOC_PROT_ERR;
2463 case OCRDMA_CQE_WR_FLUSH_ERR:
2464 ibwc_status = IB_WC_WR_FLUSH_ERR;
2466 case OCRDMA_CQE_MW_BIND_ERR:
2467 ibwc_status = IB_WC_MW_BIND_ERR;
2469 case OCRDMA_CQE_BAD_RESP_ERR:
2470 ibwc_status = IB_WC_BAD_RESP_ERR;
2472 case OCRDMA_CQE_LOC_ACCESS_ERR:
2473 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2475 case OCRDMA_CQE_REM_INV_REQ_ERR:
2476 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2478 case OCRDMA_CQE_REM_ACCESS_ERR:
2479 ibwc_status = IB_WC_REM_ACCESS_ERR;
2481 case OCRDMA_CQE_REM_OP_ERR:
2482 ibwc_status = IB_WC_REM_OP_ERR;
2484 case OCRDMA_CQE_RETRY_EXC_ERR:
2485 ibwc_status = IB_WC_RETRY_EXC_ERR;
2487 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2488 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2490 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2491 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2493 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2494 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2496 case OCRDMA_CQE_REM_ABORT_ERR:
2497 ibwc_status = IB_WC_REM_ABORT_ERR;
2499 case OCRDMA_CQE_INV_EECN_ERR:
2500 ibwc_status = IB_WC_INV_EECN_ERR;
2502 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2503 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2505 case OCRDMA_CQE_FATAL_ERR:
2506 ibwc_status = IB_WC_FATAL_ERR;
2508 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2509 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2512 ibwc_status = IB_WC_GENERAL_ERR;
2518 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2521 struct ocrdma_hdr_wqe *hdr;
2522 struct ocrdma_sge *rw;
2525 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2527 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2528 /* Undo the hdr->cw swap */
2529 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2532 ibwc->opcode = IB_WC_RDMA_WRITE;
2535 rw = (struct ocrdma_sge *)(hdr + 1);
2536 ibwc->opcode = IB_WC_RDMA_READ;
2537 ibwc->byte_len = rw->len;
2540 ibwc->opcode = IB_WC_SEND;
2543 ibwc->opcode = IB_WC_REG_MR;
2545 case OCRDMA_LKEY_INV:
2546 ibwc->opcode = IB_WC_LOCAL_INV;
2549 ibwc->status = IB_WC_GENERAL_ERR;
2550 pr_err("%s() invalid opcode received = 0x%x\n",
2551 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2556 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2557 struct ocrdma_cqe *cqe)
2559 if (is_cqe_for_sq(cqe)) {
2560 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2561 cqe->flags_status_srcqpn) &
2562 ~OCRDMA_CQE_STATUS_MASK);
2563 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2564 cqe->flags_status_srcqpn) |
2565 (OCRDMA_CQE_WR_FLUSH_ERR <<
2566 OCRDMA_CQE_STATUS_SHIFT));
2568 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2569 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2570 cqe->flags_status_srcqpn) &
2571 ~OCRDMA_CQE_UD_STATUS_MASK);
2572 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2573 cqe->flags_status_srcqpn) |
2574 (OCRDMA_CQE_WR_FLUSH_ERR <<
2575 OCRDMA_CQE_UD_STATUS_SHIFT));
2577 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2578 cqe->flags_status_srcqpn) &
2579 ~OCRDMA_CQE_STATUS_MASK);
2580 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2581 cqe->flags_status_srcqpn) |
2582 (OCRDMA_CQE_WR_FLUSH_ERR <<
2583 OCRDMA_CQE_STATUS_SHIFT));
2588 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2589 struct ocrdma_qp *qp, int status)
2591 bool expand = false;
2594 ibwc->qp = &qp->ibqp;
2595 ibwc->status = ocrdma_to_ibwc_err(status);
2597 ocrdma_flush_qp(qp);
2598 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2600 /* if wqe/rqe pending for which cqe needs to be returned,
2601 * trigger inflating it.
2603 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2605 ocrdma_set_cqe_status_flushed(qp, cqe);
2610 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2611 struct ocrdma_qp *qp, int status)
2613 ibwc->opcode = IB_WC_RECV;
2614 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2615 ocrdma_hwq_inc_tail(&qp->rq);
2617 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2620 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2621 struct ocrdma_qp *qp, int status)
2623 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2624 ocrdma_hwq_inc_tail(&qp->sq);
2626 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2630 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2631 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2632 bool *polled, bool *stop)
2635 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2636 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2637 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2638 if (status < OCRDMA_MAX_CQE_ERR)
2639 atomic_inc(&dev->cqe_err_stats[status]);
2641 /* when hw sq is empty, but rq is not empty, so we continue
2642 * to keep the cqe in order to get the cq event again.
2644 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2645 /* when cq for rq and sq is same, it is safe to return
2646 * flush cqe for RQEs.
2648 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2650 status = OCRDMA_CQE_WR_FLUSH_ERR;
2651 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2653 /* stop processing further cqe as this cqe is used for
2654 * triggering cq event on buddy cq of RQ.
2655 * When QP is destroyed, this cqe will be removed
2656 * from the cq's hardware q.
2662 } else if (is_hw_sq_empty(qp)) {
2669 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2674 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2675 struct ocrdma_cqe *cqe,
2676 struct ib_wc *ibwc, bool *polled)
2678 bool expand = false;
2679 int tail = qp->sq.tail;
2682 if (!qp->wqe_wr_id_tbl[tail].signaled) {
2683 *polled = false; /* WC cannot be consumed yet */
2685 ibwc->status = IB_WC_SUCCESS;
2687 ibwc->qp = &qp->ibqp;
2688 ocrdma_update_wc(qp, ibwc, tail);
2691 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2692 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2693 if (tail != wqe_idx)
2694 expand = true; /* Coalesced CQE can't be consumed yet */
2696 ocrdma_hwq_inc_tail(&qp->sq);
2700 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2701 struct ib_wc *ibwc, bool *polled, bool *stop)
2706 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2707 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2709 if (status == OCRDMA_CQE_SUCCESS)
2710 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2712 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2716 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2717 struct ocrdma_cqe *cqe)
2722 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2723 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2724 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2725 OCRDMA_CQE_SRCQP_MASK;
2726 ibwc->pkey_index = 0;
2727 ibwc->wc_flags = IB_WC_GRH;
2728 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2729 OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2730 OCRDMA_CQE_UD_XFER_LEN_MASK;
2732 if (ocrdma_is_udp_encap_supported(dev)) {
2733 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2734 OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2735 OCRDMA_CQE_UD_L3TYPE_MASK;
2736 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2737 ibwc->network_hdr_type = hdr_type;
2743 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2744 struct ocrdma_cqe *cqe,
2745 struct ocrdma_qp *qp)
2747 unsigned long flags;
2748 struct ocrdma_srq *srq;
2751 srq = get_ocrdma_srq(qp->ibqp.srq);
2752 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2753 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2754 BUG_ON(wqe_idx < 1);
2756 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2757 spin_lock_irqsave(&srq->q_lock, flags);
2758 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2759 spin_unlock_irqrestore(&srq->q_lock, flags);
2760 ocrdma_hwq_inc_tail(&srq->rq);
2763 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2764 struct ib_wc *ibwc, bool *polled, bool *stop,
2768 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2770 if (status < OCRDMA_MAX_CQE_ERR)
2771 atomic_inc(&dev->cqe_err_stats[status]);
2773 /* when hw_rq is empty, but wq is not empty, so continue
2774 * to keep the cqe to get the cq event again.
2776 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2777 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2779 status = OCRDMA_CQE_WR_FLUSH_ERR;
2780 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2786 } else if (is_hw_rq_empty(qp)) {
2793 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2798 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2799 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2801 struct ocrdma_dev *dev;
2803 dev = get_ocrdma_dev(qp->ibqp.device);
2804 ibwc->opcode = IB_WC_RECV;
2805 ibwc->qp = &qp->ibqp;
2806 ibwc->status = IB_WC_SUCCESS;
2808 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2809 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2811 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2813 if (is_cqe_imm(cqe)) {
2814 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2815 ibwc->wc_flags |= IB_WC_WITH_IMM;
2816 } else if (is_cqe_wr_imm(cqe)) {
2817 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2818 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2819 ibwc->wc_flags |= IB_WC_WITH_IMM;
2820 } else if (is_cqe_invalidated(cqe)) {
2821 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2822 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2825 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2827 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2828 ocrdma_hwq_inc_tail(&qp->rq);
2832 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2833 struct ib_wc *ibwc, bool *polled, bool *stop)
2836 bool expand = false;
2839 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2840 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2841 OCRDMA_CQE_UD_STATUS_MASK) >>
2842 OCRDMA_CQE_UD_STATUS_SHIFT;
2844 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2845 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2848 if (status == OCRDMA_CQE_SUCCESS) {
2850 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2852 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2858 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2861 if (cq->phase_change) {
2863 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2865 /* clear valid bit */
2866 cqe->flags_status_srcqpn = 0;
2870 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2875 bool expand = false;
2876 int polled_hw_cqes = 0;
2877 struct ocrdma_qp *qp = NULL;
2878 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2879 struct ocrdma_cqe *cqe;
2880 u16 cur_getp; bool polled = false; bool stop = false;
2882 cur_getp = cq->getp;
2883 while (num_entries) {
2884 cqe = cq->va + cur_getp;
2885 /* check whether valid cqe or not */
2886 if (!is_cqe_valid(cq, cqe))
2888 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2889 /* ignore discarded cqe */
2892 qp = dev->qp_tbl[qpn];
2895 if (is_cqe_for_sq(cqe)) {
2896 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2899 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2906 /* clear qpn to avoid duplicate processing by discard_cqe() */
2909 polled_hw_cqes += 1;
2910 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2911 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2921 cq->getp = cur_getp;
2924 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2929 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2930 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2931 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2935 while (num_entries) {
2936 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2938 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2939 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2940 ocrdma_hwq_inc_tail(&qp->sq);
2941 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2942 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2943 ocrdma_hwq_inc_tail(&qp->rq);
2948 ibwc->status = IB_WC_WR_FLUSH_ERR;
2956 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2958 int cqes_to_poll = num_entries;
2959 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2960 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2961 int num_os_cqe = 0, err_cqes = 0;
2962 struct ocrdma_qp *qp;
2963 unsigned long flags;
2965 /* poll cqes from adapter CQ */
2966 spin_lock_irqsave(&cq->cq_lock, flags);
2967 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2968 spin_unlock_irqrestore(&cq->cq_lock, flags);
2969 cqes_to_poll -= num_os_cqe;
2972 wc = wc + num_os_cqe;
2973 /* adapter returns single error cqe when qp moves to
2974 * error state. So insert error cqes with wc_status as
2975 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2976 * respectively which uses this CQ.
2978 spin_lock_irqsave(&dev->flush_q_lock, flags);
2979 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2980 if (cqes_to_poll == 0)
2982 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2983 cqes_to_poll -= err_cqes;
2984 num_os_cqe += err_cqes;
2987 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2992 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2994 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2995 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2997 unsigned long flags;
2998 bool arm_needed = false, sol_needed = false;
3002 spin_lock_irqsave(&cq->cq_lock, flags);
3003 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
3005 if (cq_flags & IB_CQ_SOLICITED)
3008 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3009 spin_unlock_irqrestore(&cq->cq_lock, flags);
3014 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
3015 enum ib_mr_type mr_type,
3019 struct ocrdma_mr *mr;
3020 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3021 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3023 if (mr_type != IB_MR_TYPE_MEM_REG)
3024 return ERR_PTR(-EINVAL);
3026 if (max_num_sg > dev->attr.max_pages_per_frmr)
3027 return ERR_PTR(-EINVAL);
3029 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3031 return ERR_PTR(-ENOMEM);
3033 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3039 status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
3043 mr->hwmr.remote_rd = 0;
3044 mr->hwmr.remote_wr = 0;
3045 mr->hwmr.local_rd = 0;
3046 mr->hwmr.local_wr = 0;
3047 mr->hwmr.mw_bind = 0;
3048 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3051 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3054 mr->ibmr.rkey = mr->hwmr.lkey;
3055 mr->ibmr.lkey = mr->hwmr.lkey;
3056 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3060 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3065 return ERR_PTR(-ENOMEM);
3068 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
3070 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
3072 if (unlikely(mr->npages == mr->hwmr.num_pbes))
3075 mr->pages[mr->npages++] = addr;
3080 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
3081 unsigned int *sg_offset)
3083 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
3087 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);