2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
55 #include "qplib_res.h"
58 #include "qplib_rcfw.h"
62 #include <rdma/bnxt_re-abi.h>
64 static int __from_ib_access_flags(int iflags)
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 enum ib_access_flags iflags = 0;
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
128 netdev = rdev->netdev;
136 int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
143 memset(ib_attr, 0, sizeof(*ib_attr));
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
148 ib_attr->max_mr_size = ~0ull;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
150 BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
151 BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
153 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
154 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
155 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
156 ib_attr->max_qp = dev_attr->max_qp;
157 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
158 ib_attr->device_cap_flags =
159 IB_DEVICE_CURR_QP_STATE_MOD
160 | IB_DEVICE_RC_RNR_NAK_GEN
161 | IB_DEVICE_SHUTDOWN_PORT
162 | IB_DEVICE_SYS_IMAGE_GUID
163 | IB_DEVICE_LOCAL_DMA_LKEY
164 | IB_DEVICE_RESIZE_MAX_WR
165 | IB_DEVICE_PORT_ACTIVE_EVENT
166 | IB_DEVICE_N_NOTIFY_CQ
167 | IB_DEVICE_MEM_WINDOW
168 | IB_DEVICE_MEM_WINDOW_TYPE_2B
169 | IB_DEVICE_MEM_MGT_EXTENSIONS;
170 ib_attr->max_sge = dev_attr->max_qp_sges;
171 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
172 ib_attr->max_cq = dev_attr->max_cq;
173 ib_attr->max_cqe = dev_attr->max_cq_wqes;
174 ib_attr->max_mr = dev_attr->max_mr;
175 ib_attr->max_pd = dev_attr->max_pd;
176 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
177 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
178 ib_attr->atomic_cap = IB_ATOMIC_HCA;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
203 ib_attr->max_pkeys = 1;
204 ib_attr->local_ca_ack_delay = 0;
208 int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
226 static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
228 struct ethtool_link_ksettings lksettings;
231 if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
232 memset(&lksettings, 0, sizeof(lksettings));
234 netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
236 espeed = lksettings.base.speed;
238 espeed = SPEED_UNKNOWN;
242 *speed = IB_SPEED_SDR;
243 *width = IB_WIDTH_1X;
246 *speed = IB_SPEED_QDR;
247 *width = IB_WIDTH_1X;
250 *speed = IB_SPEED_DDR;
251 *width = IB_WIDTH_4X;
254 *speed = IB_SPEED_EDR;
255 *width = IB_WIDTH_1X;
258 *speed = IB_SPEED_QDR;
259 *width = IB_WIDTH_4X;
264 *speed = IB_SPEED_SDR;
265 *width = IB_WIDTH_1X;
271 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
272 struct ib_port_attr *port_attr)
274 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
275 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
277 memset(port_attr, 0, sizeof(*port_attr));
279 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
280 port_attr->state = IB_PORT_ACTIVE;
281 port_attr->phys_state = 5;
283 port_attr->state = IB_PORT_DOWN;
284 port_attr->phys_state = 3;
286 port_attr->max_mtu = IB_MTU_4096;
287 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
288 port_attr->gid_tbl_len = dev_attr->max_sgid;
289 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
290 IB_PORT_DEVICE_MGMT_SUP |
291 IB_PORT_VENDOR_CLASS_SUP |
292 IB_PORT_IP_BASED_GIDS;
294 /* Max MSG size set to 2G for now */
295 port_attr->max_msg_sz = 0x80000000;
296 port_attr->bad_pkey_cntr = 0;
297 port_attr->qkey_viol_cntr = 0;
298 port_attr->pkey_tbl_len = dev_attr->max_pkey;
300 port_attr->sm_lid = 0;
302 port_attr->max_vl_num = 4;
303 port_attr->sm_sl = 0;
304 port_attr->subnet_timeout = 0;
305 port_attr->init_type_reply = 0;
306 /* call the underlying netdev's ethtool hooks to query speed settings
307 * for which we acquire rtnl_lock _only_ if it's registered with
308 * IB stack to avoid race in the NETDEV_UNREG path
310 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
311 __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
312 &port_attr->active_width);
316 int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
317 int port_modify_mask,
318 struct ib_port_modify *port_modify)
320 switch (port_modify_mask) {
321 case IB_PORT_SHUTDOWN:
323 case IB_PORT_INIT_TYPE:
325 case IB_PORT_RESET_QKEY_CNTR:
333 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
334 struct ib_port_immutable *immutable)
336 struct ib_port_attr port_attr;
338 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
341 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
342 immutable->gid_tbl_len = port_attr.gid_tbl_len;
343 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
344 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
345 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
349 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
350 u16 index, u16 *pkey)
352 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
354 /* Ignore port_num */
356 memset(pkey, 0, sizeof(*pkey));
357 return bnxt_qplib_get_pkey(&rdev->qplib_res,
358 &rdev->qplib_res.pkey_tbl, index, pkey);
361 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
362 int index, union ib_gid *gid)
364 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
367 /* Ignore port_num */
368 memset(gid, 0, sizeof(*gid));
369 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
370 &rdev->qplib_res.sgid_tbl, index,
371 (struct bnxt_qplib_gid *)gid);
375 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
376 unsigned int index, void **context)
379 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
380 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
381 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
383 /* Delete the entry from the hardware */
388 if (sgid_tbl && sgid_tbl->active) {
389 if (ctx->idx >= sgid_tbl->max)
393 rc = bnxt_qplib_del_sgid
395 &sgid_tbl->tbl[ctx->idx], true);
397 dev_err(rdev_to_dev(rdev),
398 "Failed to remove GID: %#x", rc);
399 ctx_tbl = sgid_tbl->ctx;
400 ctx_tbl[ctx->idx] = NULL;
409 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
410 unsigned int index, const union ib_gid *gid,
411 const struct ib_gid_attr *attr, void **context)
415 u16 vlan_id = 0xFFFF;
416 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
417 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
418 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
420 if ((attr->ndev) && is_vlan_dev(attr->ndev))
421 vlan_id = vlan_dev_vlan_id(attr->ndev);
423 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
424 rdev->qplib_res.netdev->dev_addr,
425 vlan_id, true, &tbl_idx);
426 if (rc == -EALREADY) {
427 ctx_tbl = sgid_tbl->ctx;
428 ctx_tbl[tbl_idx]->refcnt++;
429 *context = ctx_tbl[tbl_idx];
434 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
438 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
441 ctx_tbl = sgid_tbl->ctx;
444 ctx_tbl[tbl_idx] = ctx;
449 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
452 return IB_LINK_LAYER_ETHERNET;
455 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
457 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct ib_mr *ib_mr = &fence->mr->ib_mr;
461 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
463 memset(wqe, 0, sizeof(*wqe));
464 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
465 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
466 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
467 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
468 wqe->bind.zero_based = false;
469 wqe->bind.parent_l_key = ib_mr->lkey;
470 wqe->bind.va = (u64)(unsigned long)fence->va;
471 wqe->bind.length = fence->size;
472 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
473 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
475 /* Save the initial rkey in fence structure for now;
476 * wqe->bind.r_key will be set at (re)bind time.
478 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
481 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
483 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
485 struct ib_pd *ib_pd = qp->ib_qp.pd;
486 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
489 struct bnxt_qplib_swqe wqe;
492 memcpy(&wqe, fence_wqe, sizeof(wqe));
493 wqe.bind.r_key = fence->bind_rkey;
494 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
496 dev_dbg(rdev_to_dev(qp->rdev),
497 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
498 wqe.bind.r_key, qp->qplib_qp.id, pd);
499 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
501 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
504 bnxt_qplib_post_send_db(&qp->qplib_qp);
509 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
511 struct bnxt_re_fence_data *fence = &pd->fence;
512 struct bnxt_re_dev *rdev = pd->rdev;
513 struct device *dev = &rdev->en_dev->pdev->dev;
514 struct bnxt_re_mr *mr = fence->mr;
517 bnxt_re_dealloc_mw(fence->mw);
522 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
525 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
529 if (fence->dma_addr) {
530 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
536 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
538 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
539 struct bnxt_re_fence_data *fence = &pd->fence;
540 struct bnxt_re_dev *rdev = pd->rdev;
541 struct device *dev = &rdev->en_dev->pdev->dev;
542 struct bnxt_re_mr *mr = NULL;
543 dma_addr_t dma_addr = 0;
548 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
550 rc = dma_mapping_error(dev, dma_addr);
552 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
557 fence->dma_addr = dma_addr;
560 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
567 mr->qplib_mr.pd = &pd->qplib_pd;
568 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
569 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
570 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
572 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
577 mr->ib_mr.lkey = mr->qplib_mr.lkey;
578 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
579 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
581 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
582 BNXT_RE_FENCE_PBL_SIZE, false);
584 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
587 mr->ib_mr.rkey = mr->qplib_mr.rkey;
589 /* Create a fence MW only for kernel consumers */
590 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
592 dev_err(rdev_to_dev(rdev),
593 "Failed to create fence-MW for PD: %p\n", pd);
599 bnxt_re_create_fence_wqe(pd);
603 bnxt_re_destroy_fence_mr(pd);
607 /* Protection Domains */
608 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
610 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
611 struct bnxt_re_dev *rdev = pd->rdev;
614 bnxt_re_destroy_fence_mr(pd);
616 if (pd->qplib_pd.id) {
617 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
618 &rdev->qplib_res.pd_tbl,
621 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
628 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
629 struct ib_ucontext *ucontext,
630 struct ib_udata *udata)
632 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
633 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
634 struct bnxt_re_ucontext,
636 struct bnxt_re_pd *pd;
639 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
641 return ERR_PTR(-ENOMEM);
644 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
645 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
651 struct bnxt_re_pd_resp resp;
653 if (!ucntx->dpi.dbr) {
654 /* Allocate DPI in alloc_pd to avoid failing of
655 * ibv_devinfo and family of application when DPIs
658 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
659 &ucntx->dpi, ucntx)) {
665 resp.pdid = pd->qplib_pd.id;
666 /* Still allow mapping this DBR to the new user PD. */
667 resp.dpi = ucntx->dpi.dpi;
668 resp.dbr = (u64)ucntx->dpi.umdbr;
670 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
672 dev_err(rdev_to_dev(rdev),
673 "Failed to copy user response\n");
679 if (bnxt_re_create_fence_mr(pd))
680 dev_warn(rdev_to_dev(rdev),
681 "Failed to create Fence-MR\n");
684 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
691 /* Address Handles */
692 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
694 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
695 struct bnxt_re_dev *rdev = ah->rdev;
698 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
700 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
707 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
708 struct rdma_ah_attr *ah_attr,
709 struct ib_udata *udata)
711 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
712 struct bnxt_re_dev *rdev = pd->rdev;
713 struct bnxt_re_ah *ah;
714 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
719 struct ib_gid_attr sgid_attr;
721 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
722 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
723 return ERR_PTR(-EINVAL);
725 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
727 return ERR_PTR(-ENOMEM);
730 ah->qplib_ah.pd = &pd->qplib_pd;
732 /* Supply the configuration for the HW */
733 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
734 sizeof(union ib_gid));
736 * If RoCE V2 is enabled, stack will have two entries for
737 * each GID entry. Avoiding this duplicte entry in HW. Dividing
738 * the GID index by 2 for RoCE V2
740 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
741 ah->qplib_ah.host_sgid_index = grh->sgid_index;
742 ah->qplib_ah.traffic_class = grh->traffic_class;
743 ah->qplib_ah.flow_label = grh->flow_label;
744 ah->qplib_ah.hop_limit = grh->hop_limit;
745 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
746 if (ib_pd->uobject &&
747 !rdma_is_multicast_addr((struct in6_addr *)
749 !rdma_link_local_addr((struct in6_addr *)
753 rc = ib_get_cached_gid(&rdev->ibdev, 1,
754 grh->sgid_index, &sgid,
757 dev_err(rdev_to_dev(rdev),
758 "Failed to query gid at index %d",
762 if (sgid_attr.ndev) {
763 if (is_vlan_dev(sgid_attr.ndev))
764 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
765 dev_put(sgid_attr.ndev);
767 /* Get network header type for this GID */
768 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
770 case RDMA_NETWORK_IPV4:
771 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
773 case RDMA_NETWORK_IPV6:
774 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
777 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
780 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
781 ah_attr->roce.dmac, &vlan_tag,
782 &sgid_attr.ndev->ifindex,
785 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
790 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
791 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
793 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
797 /* Write AVID to shared page. */
798 if (ib_pd->uobject) {
799 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
800 struct bnxt_re_ucontext *uctx;
804 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
805 spin_lock_irqsave(&uctx->sh_lock, flag);
806 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
807 *wrptr = ah->qplib_ah.id;
808 wmb(); /* make sure cache is updated. */
809 spin_unlock_irqrestore(&uctx->sh_lock, flag);
819 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
824 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
826 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
828 ah_attr->type = ib_ah->type;
829 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
830 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
831 rdma_ah_set_grh(ah_attr, NULL, 0,
832 ah->qplib_ah.host_sgid_index,
833 0, ah->qplib_ah.traffic_class);
834 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
835 rdma_ah_set_port_num(ah_attr, 1);
836 rdma_ah_set_static_rate(ah_attr, 0);
841 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
843 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
844 struct bnxt_re_dev *rdev = qp->rdev;
847 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
849 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
852 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
853 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
854 &rdev->sqp_ah->qplib_ah);
856 dev_err(rdev_to_dev(rdev),
857 "Failed to destroy HW AH for shadow QP");
861 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
862 &rdev->qp1_sqp->qplib_qp);
864 dev_err(rdev_to_dev(rdev),
865 "Failed to destroy Shadow QP");
868 mutex_lock(&rdev->qp_lock);
869 list_del(&rdev->qp1_sqp->list);
870 atomic_dec(&rdev->qp_count);
871 mutex_unlock(&rdev->qp_lock);
874 kfree(rdev->qp1_sqp);
877 if (!IS_ERR_OR_NULL(qp->rumem))
878 ib_umem_release(qp->rumem);
879 if (!IS_ERR_OR_NULL(qp->sumem))
880 ib_umem_release(qp->sumem);
882 mutex_lock(&rdev->qp_lock);
884 atomic_dec(&rdev->qp_count);
885 mutex_unlock(&rdev->qp_lock);
890 static u8 __from_ib_qp_type(enum ib_qp_type type)
894 return CMDQ_CREATE_QP1_TYPE_GSI;
896 return CMDQ_CREATE_QP_TYPE_RC;
898 return CMDQ_CREATE_QP_TYPE_UD;
904 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
905 struct bnxt_re_qp *qp, struct ib_udata *udata)
907 struct bnxt_re_qp_req ureq;
908 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
909 struct ib_umem *umem;
911 struct ib_ucontext *context = pd->ib_pd.uobject->context;
912 struct bnxt_re_ucontext *cntx = container_of(context,
913 struct bnxt_re_ucontext,
915 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
918 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
919 /* Consider mapping PSN search memory only for RC QPs. */
920 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
921 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
922 bytes = PAGE_ALIGN(bytes);
923 umem = ib_umem_get(context, ureq.qpsva, bytes,
924 IB_ACCESS_LOCAL_WRITE, 1);
926 return PTR_ERR(umem);
929 qplib_qp->sq.sglist = umem->sg_head.sgl;
930 qplib_qp->sq.nmap = umem->nmap;
931 qplib_qp->qp_handle = ureq.qp_handle;
933 if (!qp->qplib_qp.srq) {
934 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
935 bytes = PAGE_ALIGN(bytes);
936 umem = ib_umem_get(context, ureq.qprva, bytes,
937 IB_ACCESS_LOCAL_WRITE, 1);
941 qplib_qp->rq.sglist = umem->sg_head.sgl;
942 qplib_qp->rq.nmap = umem->nmap;
945 qplib_qp->dpi = &cntx->dpi;
948 ib_umem_release(qp->sumem);
950 qplib_qp->sq.sglist = NULL;
951 qplib_qp->sq.nmap = 0;
953 return PTR_ERR(umem);
956 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
957 (struct bnxt_re_pd *pd,
958 struct bnxt_qplib_res *qp1_res,
959 struct bnxt_qplib_qp *qp1_qp)
961 struct bnxt_re_dev *rdev = pd->rdev;
962 struct bnxt_re_ah *ah;
966 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
970 memset(ah, 0, sizeof(*ah));
972 ah->qplib_ah.pd = &pd->qplib_pd;
974 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
978 /* supply the dgid data same as sgid */
979 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
980 sizeof(union ib_gid));
981 ah->qplib_ah.sgid_index = 0;
983 ah->qplib_ah.traffic_class = 0;
984 ah->qplib_ah.flow_label = 0;
985 ah->qplib_ah.hop_limit = 1;
987 /* Have DMAC same as SMAC */
988 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
990 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
992 dev_err(rdev_to_dev(rdev),
993 "Failed to allocate HW AH for Shadow QP");
1004 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1005 (struct bnxt_re_pd *pd,
1006 struct bnxt_qplib_res *qp1_res,
1007 struct bnxt_qplib_qp *qp1_qp)
1009 struct bnxt_re_dev *rdev = pd->rdev;
1010 struct bnxt_re_qp *qp;
1013 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1017 memset(qp, 0, sizeof(*qp));
1020 /* Initialize the shadow QP structure from the QP1 values */
1021 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1023 qp->qplib_qp.pd = &pd->qplib_pd;
1024 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1025 qp->qplib_qp.type = IB_QPT_UD;
1027 qp->qplib_qp.max_inline_data = 0;
1028 qp->qplib_qp.sig_type = true;
1030 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1031 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1032 qp->qplib_qp.sq.max_sge = 2;
1033 /* Q full delta can be 1 since it is internal QP */
1034 qp->qplib_qp.sq.q_full_delta = 1;
1036 qp->qplib_qp.scq = qp1_qp->scq;
1037 qp->qplib_qp.rcq = qp1_qp->rcq;
1039 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1040 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1041 /* Q full delta can be 1 since it is internal QP */
1042 qp->qplib_qp.rq.q_full_delta = 1;
1044 qp->qplib_qp.mtu = qp1_qp->mtu;
1046 qp->qplib_qp.sq_hdr_buf_size = 0;
1047 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1048 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1050 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1054 rdev->sqp_id = qp->qplib_qp.id;
1056 spin_lock_init(&qp->sq_lock);
1057 INIT_LIST_HEAD(&qp->list);
1058 mutex_lock(&rdev->qp_lock);
1059 list_add_tail(&qp->list, &rdev->qp_list);
1060 atomic_inc(&rdev->qp_count);
1061 mutex_unlock(&rdev->qp_lock);
1068 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1069 struct ib_qp_init_attr *qp_init_attr,
1070 struct ib_udata *udata)
1072 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1073 struct bnxt_re_dev *rdev = pd->rdev;
1074 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1075 struct bnxt_re_qp *qp;
1076 struct bnxt_re_cq *cq;
1079 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1080 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1081 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1082 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1083 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1084 return ERR_PTR(-EINVAL);
1086 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1088 return ERR_PTR(-ENOMEM);
1091 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1092 qp->qplib_qp.pd = &pd->qplib_pd;
1093 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1094 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1095 if (qp->qplib_qp.type == IB_QPT_MAX) {
1096 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1101 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1102 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1103 IB_SIGNAL_ALL_WR) ? true : false);
1105 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1106 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1107 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1109 if (qp_init_attr->send_cq) {
1110 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1113 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1117 qp->qplib_qp.scq = &cq->qplib_cq;
1120 if (qp_init_attr->recv_cq) {
1121 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1124 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1128 qp->qplib_qp.rcq = &cq->qplib_cq;
1131 if (qp_init_attr->srq) {
1132 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1136 /* Allocate 1 more than what's provided so posting max doesn't
1139 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1140 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1141 dev_attr->max_qp_wqes + 1);
1143 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1144 qp_init_attr->cap.max_recv_wr;
1146 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1147 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1148 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1151 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1153 if (qp_init_attr->qp_type == IB_QPT_GSI) {
1154 /* Allocate 1 more than what's provided */
1155 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1156 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1157 dev_attr->max_qp_wqes + 1);
1158 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1159 qp_init_attr->cap.max_send_wr;
1160 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1161 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1162 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1163 qp->qplib_qp.sq.max_sge++;
1164 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1165 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1167 qp->qplib_qp.rq_hdr_buf_size =
1168 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1170 qp->qplib_qp.sq_hdr_buf_size =
1171 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1172 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1173 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1175 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1178 /* Create a shadow QP to handle the QP1 traffic */
1179 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1181 if (!rdev->qp1_sqp) {
1183 dev_err(rdev_to_dev(rdev),
1184 "Failed to create Shadow QP for QP1");
1187 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1189 if (!rdev->sqp_ah) {
1190 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1191 &rdev->qp1_sqp->qplib_qp);
1193 dev_err(rdev_to_dev(rdev),
1194 "Failed to create AH entry for ShadowQP");
1199 /* Allocate 128 + 1 more than what's provided */
1200 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1201 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1202 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1203 dev_attr->max_qp_wqes +
1204 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1205 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1208 * Reserving one slot for Phantom WQE. Application can
1209 * post one extra entry in this case. But allowing this to avoid
1210 * unexpected Queue full condition
1213 qp->qplib_qp.sq.q_full_delta -= 1;
1215 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1216 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1218 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1222 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1225 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1227 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1232 qp->ib_qp.qp_num = qp->qplib_qp.id;
1233 spin_lock_init(&qp->sq_lock);
1234 spin_lock_init(&qp->rq_lock);
1237 struct bnxt_re_qp_resp resp;
1239 resp.qpid = qp->ib_qp.qp_num;
1241 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1243 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1247 INIT_LIST_HEAD(&qp->list);
1248 mutex_lock(&rdev->qp_lock);
1249 list_add_tail(&qp->list, &rdev->qp_list);
1250 atomic_inc(&rdev->qp_count);
1251 mutex_unlock(&rdev->qp_lock);
1255 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1261 static u8 __from_ib_qp_state(enum ib_qp_state state)
1265 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1267 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1269 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1271 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1273 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1275 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1278 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1282 static enum ib_qp_state __to_ib_qp_state(u8 state)
1285 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1286 return IB_QPS_RESET;
1287 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1289 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1291 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1293 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1295 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1297 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1303 static u32 __from_ib_mtu(enum ib_mtu mtu)
1307 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1309 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1311 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1313 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1315 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1317 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1321 static enum ib_mtu __to_ib_mtu(u32 mtu)
1323 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1324 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1326 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1328 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1330 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1332 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1339 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1340 struct bnxt_re_qp *qp1_qp,
1343 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1346 if (qp_attr_mask & IB_QP_STATE) {
1347 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1348 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1350 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1351 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1352 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1355 if (qp_attr_mask & IB_QP_QKEY) {
1356 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1357 /* Using a Random QKEY */
1358 qp->qplib_qp.qkey = 0x81818181;
1360 if (qp_attr_mask & IB_QP_SQ_PSN) {
1361 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1362 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1365 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1367 dev_err(rdev_to_dev(rdev),
1368 "Failed to modify Shadow QP for QP1");
1372 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1373 int qp_attr_mask, struct ib_udata *udata)
1375 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1376 struct bnxt_re_dev *rdev = qp->rdev;
1377 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1378 enum ib_qp_state curr_qp_state, new_qp_state;
1382 struct ib_gid_attr sgid_attr;
1385 qp->qplib_qp.modify_flags = 0;
1386 if (qp_attr_mask & IB_QP_STATE) {
1387 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1388 new_qp_state = qp_attr->qp_state;
1389 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1390 ib_qp->qp_type, qp_attr_mask,
1391 IB_LINK_LAYER_ETHERNET)) {
1392 dev_err(rdev_to_dev(rdev),
1393 "Invalid attribute mask: %#x specified ",
1395 dev_err(rdev_to_dev(rdev),
1396 "for qpn: %#x type: %#x",
1397 ib_qp->qp_num, ib_qp->qp_type);
1398 dev_err(rdev_to_dev(rdev),
1399 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1400 curr_qp_state, new_qp_state);
1403 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1404 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1406 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1407 qp->qplib_qp.modify_flags |=
1408 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1409 qp->qplib_qp.en_sqd_async_notify = true;
1411 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1412 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1413 qp->qplib_qp.access =
1414 __from_ib_access_flags(qp_attr->qp_access_flags);
1415 /* LOCAL_WRITE access must be set to allow RC receive */
1416 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1418 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1419 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1420 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1422 if (qp_attr_mask & IB_QP_QKEY) {
1423 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1424 qp->qplib_qp.qkey = qp_attr->qkey;
1426 if (qp_attr_mask & IB_QP_AV) {
1427 const struct ib_global_route *grh =
1428 rdma_ah_read_grh(&qp_attr->ah_attr);
1430 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1431 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1432 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1433 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1434 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1435 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1436 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1437 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1438 sizeof(qp->qplib_qp.ah.dgid.data));
1439 qp->qplib_qp.ah.flow_label = grh->flow_label;
1440 /* If RoCE V2 is enabled, stack will have two entries for
1441 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1442 * the GID index by 2 for RoCE V2
1444 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1445 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1446 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1447 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1448 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1449 ether_addr_copy(qp->qplib_qp.ah.dmac,
1450 qp_attr->ah_attr.roce.dmac);
1452 status = ib_get_cached_gid(&rdev->ibdev, 1,
1455 if (!status && sgid_attr.ndev) {
1456 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1458 dev_put(sgid_attr.ndev);
1459 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1462 case RDMA_NETWORK_IPV4:
1463 qp->qplib_qp.nw_type =
1464 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1466 case RDMA_NETWORK_IPV6:
1467 qp->qplib_qp.nw_type =
1468 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1471 qp->qplib_qp.nw_type =
1472 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1478 if (qp_attr_mask & IB_QP_PATH_MTU) {
1479 qp->qplib_qp.modify_flags |=
1480 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1481 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1482 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1483 qp->qplib_qp.modify_flags |=
1484 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1485 qp->qplib_qp.path_mtu =
1486 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1489 if (qp_attr_mask & IB_QP_TIMEOUT) {
1490 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1491 qp->qplib_qp.timeout = qp_attr->timeout;
1493 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1494 qp->qplib_qp.modify_flags |=
1495 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1496 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1498 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1499 qp->qplib_qp.modify_flags |=
1500 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1501 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1503 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1504 qp->qplib_qp.modify_flags |=
1505 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1506 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1508 if (qp_attr_mask & IB_QP_RQ_PSN) {
1509 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1510 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1512 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1513 qp->qplib_qp.modify_flags |=
1514 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1515 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
1517 if (qp_attr_mask & IB_QP_SQ_PSN) {
1518 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1519 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1521 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1522 qp->qplib_qp.modify_flags |=
1523 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1524 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1526 if (qp_attr_mask & IB_QP_CAP) {
1527 qp->qplib_qp.modify_flags |=
1528 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1529 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1530 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1531 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1532 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1533 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1534 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1535 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1536 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1537 (qp_attr->cap.max_inline_data >=
1538 dev_attr->max_inline_data)) {
1539 dev_err(rdev_to_dev(rdev),
1540 "Create QP failed - max exceeded");
1543 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1544 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1545 dev_attr->max_qp_wqes + 1);
1546 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1547 qp_attr->cap.max_send_wr;
1549 * Reserving one slot for Phantom WQE. Some application can
1550 * post one extra entry in this case. Allowing this to avoid
1551 * unexpected Queue full condition
1553 qp->qplib_qp.sq.q_full_delta -= 1;
1554 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1555 if (qp->qplib_qp.rq.max_wqe) {
1556 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1557 qp->qplib_qp.rq.max_wqe =
1558 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1559 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1560 qp_attr->cap.max_recv_wr;
1561 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1563 /* SRQ was used prior, just ignore the RQ caps */
1566 if (qp_attr_mask & IB_QP_DEST_QPN) {
1567 qp->qplib_qp.modify_flags |=
1568 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1569 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1571 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1573 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1576 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1577 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1581 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1582 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1584 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1585 struct bnxt_re_dev *rdev = qp->rdev;
1586 struct bnxt_qplib_qp qplib_qp;
1589 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1590 qplib_qp.id = qp->qplib_qp.id;
1591 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1593 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1595 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1598 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1599 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1600 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1601 qp_attr->pkey_index = qplib_qp.pkey_index;
1602 qp_attr->qkey = qplib_qp.qkey;
1603 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1604 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1605 qplib_qp.ah.host_sgid_index,
1606 qplib_qp.ah.hop_limit,
1607 qplib_qp.ah.traffic_class);
1608 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1609 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
1610 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
1611 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1612 qp_attr->timeout = qplib_qp.timeout;
1613 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1614 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1615 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1616 qp_attr->rq_psn = qplib_qp.rq.psn;
1617 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1618 qp_attr->sq_psn = qplib_qp.sq.psn;
1619 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1620 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1622 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1624 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1625 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1626 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1627 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1628 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1629 qp_init_attr->cap = qp_attr->cap;
1634 /* Routine for sending QP1 packets for RoCE V1 an V2
1636 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1637 struct ib_send_wr *wr,
1638 struct bnxt_qplib_swqe *wqe,
1641 struct ib_device *ibdev = &qp->rdev->ibdev;
1642 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1644 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1645 struct bnxt_qplib_sge sge;
1649 struct ib_gid_attr sgid_attr;
1651 bool is_eth = false;
1652 bool is_vlan = false;
1653 bool is_grh = false;
1654 bool is_udp = false;
1656 u16 vlan_id = 0xFFFF;
1658 int i, rc = 0, size;
1660 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1662 rc = ib_get_cached_gid(ibdev, 1,
1663 qplib_ah->host_sgid_index, &sgid,
1666 dev_err(rdev_to_dev(qp->rdev),
1667 "Failed to query gid at index %d",
1668 qplib_ah->host_sgid_index);
1671 if (sgid_attr.ndev) {
1672 if (is_vlan_dev(sgid_attr.ndev))
1673 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1674 dev_put(sgid_attr.ndev);
1676 /* Get network header type for this GID */
1677 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1679 case RDMA_NETWORK_IPV4:
1680 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1682 case RDMA_NETWORK_IPV6:
1683 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1686 nw_type = BNXT_RE_ROCE_V1_PACKET;
1689 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1690 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1692 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1694 ether_type = ETH_P_IP;
1697 ether_type = ETH_P_IPV6;
1701 ether_type = ETH_P_IBOE;
1706 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1708 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1709 ip_version, is_udp, 0, &qp->qp1_hdr);
1712 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1713 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1715 /* For vlan, check the sgid for vlan existence */
1718 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1720 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1721 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1724 if (is_grh || (ip_version == 6)) {
1725 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1726 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1728 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1731 if (ip_version == 4) {
1732 qp->qp1_hdr.ip4.tos = 0;
1733 qp->qp1_hdr.ip4.id = 0;
1734 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1735 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1737 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1738 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1739 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1743 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1744 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1745 qp->qp1_hdr.udp.csum = 0;
1749 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1750 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1751 qp->qp1_hdr.immediate_present = 1;
1753 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1755 if (wr->send_flags & IB_SEND_SOLICITED)
1756 qp->qp1_hdr.bth.solicited_event = 1;
1758 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1760 /* P_key for QP1 is for all members */
1761 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1762 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1763 qp->qp1_hdr.bth.ack_req = 0;
1765 qp->send_psn &= BTH_PSN_MASK;
1766 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1768 /* Use the priviledged Q_Key for QP1 */
1769 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1770 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1772 /* Pack the QP1 to the transmit buffer */
1773 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1775 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1776 for (i = wqe->num_sge; i; i--) {
1777 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1778 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1779 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1783 * Max Header buf size for IPV6 RoCE V2 is 86,
1784 * which is same as the QP1 SQ header buffer.
1785 * Header buf size for IPV4 RoCE V2 can be 66.
1786 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1787 * Subtract 20 bytes from QP1 SQ header buf size
1789 if (is_udp && ip_version == 4)
1792 * Max Header buf size for RoCE V1 is 78.
1793 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1794 * Subtract 8 bytes from QP1 SQ header buf size
1799 /* Subtract 4 bytes for non vlan packets */
1803 wqe->sg_list[0].addr = sge.addr;
1804 wqe->sg_list[0].lkey = sge.lkey;
1805 wqe->sg_list[0].size = sge.size;
1809 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1815 /* For the MAD layer, it only provides the recv SGE the size of
1816 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1817 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1818 * receive packet (334 bytes) with no VLAN and then copy the GRH
1819 * and the MAD datagram out to the provided SGE.
1821 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1822 struct ib_recv_wr *wr,
1823 struct bnxt_qplib_swqe *wqe,
1826 struct bnxt_qplib_sge ref, sge;
1828 struct bnxt_re_sqp_entries *sqp_entry;
1830 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1832 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1835 /* Create 1 SGE to receive the entire
1838 /* Save the reference from ULP */
1839 ref.addr = wqe->sg_list[0].addr;
1840 ref.lkey = wqe->sg_list[0].lkey;
1841 ref.size = wqe->sg_list[0].size;
1843 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1846 wqe->sg_list[0].addr = sge.addr;
1847 wqe->sg_list[0].lkey = sge.lkey;
1848 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1849 sge.size -= wqe->sg_list[0].size;
1851 sqp_entry->sge.addr = ref.addr;
1852 sqp_entry->sge.lkey = ref.lkey;
1853 sqp_entry->sge.size = ref.size;
1854 /* Store the wrid for reporting completion */
1855 sqp_entry->wrid = wqe->wr_id;
1856 /* change the wqe->wrid to table index */
1857 wqe->wr_id = rq_prod_index;
1861 static int is_ud_qp(struct bnxt_re_qp *qp)
1863 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1866 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1867 struct ib_send_wr *wr,
1868 struct bnxt_qplib_swqe *wqe)
1870 struct bnxt_re_ah *ah = NULL;
1873 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1874 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1875 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1876 wqe->send.avid = ah->qplib_ah.id;
1878 switch (wr->opcode) {
1880 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1882 case IB_WR_SEND_WITH_IMM:
1883 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1884 wqe->send.imm_data = wr->ex.imm_data;
1886 case IB_WR_SEND_WITH_INV:
1887 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1888 wqe->send.inv_key = wr->ex.invalidate_rkey;
1893 if (wr->send_flags & IB_SEND_SIGNALED)
1894 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1895 if (wr->send_flags & IB_SEND_FENCE)
1896 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1897 if (wr->send_flags & IB_SEND_SOLICITED)
1898 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1899 if (wr->send_flags & IB_SEND_INLINE)
1900 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1905 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1906 struct bnxt_qplib_swqe *wqe)
1908 switch (wr->opcode) {
1909 case IB_WR_RDMA_WRITE:
1910 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1912 case IB_WR_RDMA_WRITE_WITH_IMM:
1913 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1914 wqe->rdma.imm_data = wr->ex.imm_data;
1916 case IB_WR_RDMA_READ:
1917 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1918 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1923 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1924 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1925 if (wr->send_flags & IB_SEND_SIGNALED)
1926 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1927 if (wr->send_flags & IB_SEND_FENCE)
1928 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1929 if (wr->send_flags & IB_SEND_SOLICITED)
1930 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1931 if (wr->send_flags & IB_SEND_INLINE)
1932 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1937 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1938 struct bnxt_qplib_swqe *wqe)
1940 switch (wr->opcode) {
1941 case IB_WR_ATOMIC_CMP_AND_SWP:
1942 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1943 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1945 case IB_WR_ATOMIC_FETCH_AND_ADD:
1946 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1947 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1952 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1953 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1954 if (wr->send_flags & IB_SEND_SIGNALED)
1955 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1956 if (wr->send_flags & IB_SEND_FENCE)
1957 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1958 if (wr->send_flags & IB_SEND_SOLICITED)
1959 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1963 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1964 struct bnxt_qplib_swqe *wqe)
1966 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1967 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1969 if (wr->send_flags & IB_SEND_SIGNALED)
1970 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1971 if (wr->send_flags & IB_SEND_FENCE)
1972 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1973 if (wr->send_flags & IB_SEND_SOLICITED)
1974 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1979 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1980 struct bnxt_qplib_swqe *wqe)
1982 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1983 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1984 int access = wr->access;
1986 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1987 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1988 wqe->frmr.page_list = mr->pages;
1989 wqe->frmr.page_list_len = mr->npages;
1990 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1991 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1993 if (wr->wr.send_flags & IB_SEND_FENCE)
1994 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1995 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1996 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1998 if (access & IB_ACCESS_LOCAL_WRITE)
1999 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2000 if (access & IB_ACCESS_REMOTE_READ)
2001 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2002 if (access & IB_ACCESS_REMOTE_WRITE)
2003 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2004 if (access & IB_ACCESS_REMOTE_ATOMIC)
2005 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2006 if (access & IB_ACCESS_MW_BIND)
2007 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2009 wqe->frmr.l_key = wr->key;
2010 wqe->frmr.length = wr->mr->length;
2011 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2012 wqe->frmr.va = wr->mr->iova;
2016 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2017 struct ib_send_wr *wr,
2018 struct bnxt_qplib_swqe *wqe)
2020 /* Copy the inline data to the data field */
2025 in_data = wqe->inline_data;
2026 for (i = 0; i < wr->num_sge; i++) {
2027 sge_addr = (void *)(unsigned long)
2028 wr->sg_list[i].addr;
2029 sge_len = wr->sg_list[i].length;
2031 if ((sge_len + wqe->inline_len) >
2032 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2033 dev_err(rdev_to_dev(rdev),
2034 "Inline data size requested > supported value");
2037 sge_len = wr->sg_list[i].length;
2039 memcpy(in_data, sge_addr, sge_len);
2040 in_data += wr->sg_list[i].length;
2041 wqe->inline_len += wr->sg_list[i].length;
2043 return wqe->inline_len;
2046 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2047 struct ib_send_wr *wr,
2048 struct bnxt_qplib_swqe *wqe)
2052 if (wr->send_flags & IB_SEND_INLINE)
2053 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2055 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2061 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2063 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2064 qp->ib_qp.qp_type == IB_QPT_GSI ||
2065 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2066 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2068 struct ib_qp_attr qp_attr;
2070 qp_attr_mask = IB_QP_STATE;
2071 qp_attr.qp_state = IB_QPS_RTS;
2072 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2073 qp->qplib_qp.wqe_cnt = 0;
2077 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2078 struct bnxt_re_qp *qp,
2079 struct ib_send_wr *wr)
2081 struct bnxt_qplib_swqe wqe;
2082 int rc = 0, payload_sz = 0;
2083 unsigned long flags;
2085 spin_lock_irqsave(&qp->sq_lock, flags);
2086 memset(&wqe, 0, sizeof(wqe));
2089 memset(&wqe, 0, sizeof(wqe));
2092 wqe.num_sge = wr->num_sge;
2093 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2094 dev_err(rdev_to_dev(rdev),
2095 "Limit exceeded for Send SGEs");
2100 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2101 if (payload_sz < 0) {
2105 wqe.wr_id = wr->wr_id;
2107 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2109 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2111 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2114 dev_err(rdev_to_dev(rdev),
2115 "Post send failed opcode = %#x rc = %d",
2121 bnxt_qplib_post_send_db(&qp->qplib_qp);
2122 bnxt_ud_qp_hw_stall_workaround(qp);
2123 spin_unlock_irqrestore(&qp->sq_lock, flags);
2127 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2128 struct ib_send_wr **bad_wr)
2130 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2131 struct bnxt_qplib_swqe wqe;
2132 int rc = 0, payload_sz = 0;
2133 unsigned long flags;
2135 spin_lock_irqsave(&qp->sq_lock, flags);
2138 memset(&wqe, 0, sizeof(wqe));
2141 wqe.num_sge = wr->num_sge;
2142 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2143 dev_err(rdev_to_dev(qp->rdev),
2144 "Limit exceeded for Send SGEs");
2149 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2150 if (payload_sz < 0) {
2154 wqe.wr_id = wr->wr_id;
2156 switch (wr->opcode) {
2158 case IB_WR_SEND_WITH_IMM:
2159 if (ib_qp->qp_type == IB_QPT_GSI) {
2160 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2164 wqe.rawqp1.lflags |=
2165 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2167 switch (wr->send_flags) {
2168 case IB_SEND_IP_CSUM:
2169 wqe.rawqp1.lflags |=
2170 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2175 /* Fall thru to build the wqe */
2176 case IB_WR_SEND_WITH_INV:
2177 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2179 case IB_WR_RDMA_WRITE:
2180 case IB_WR_RDMA_WRITE_WITH_IMM:
2181 case IB_WR_RDMA_READ:
2182 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2184 case IB_WR_ATOMIC_CMP_AND_SWP:
2185 case IB_WR_ATOMIC_FETCH_AND_ADD:
2186 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2188 case IB_WR_RDMA_READ_WITH_INV:
2189 dev_err(rdev_to_dev(qp->rdev),
2190 "RDMA Read with Invalidate is not supported");
2193 case IB_WR_LOCAL_INV:
2194 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2197 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2200 /* Unsupported WRs */
2201 dev_err(rdev_to_dev(qp->rdev),
2202 "WR (%#x) is not supported", wr->opcode);
2207 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2210 dev_err(rdev_to_dev(qp->rdev),
2211 "post_send failed op:%#x qps = %#x rc = %d\n",
2212 wr->opcode, qp->qplib_qp.state, rc);
2218 bnxt_qplib_post_send_db(&qp->qplib_qp);
2219 bnxt_ud_qp_hw_stall_workaround(qp);
2220 spin_unlock_irqrestore(&qp->sq_lock, flags);
2225 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2226 struct bnxt_re_qp *qp,
2227 struct ib_recv_wr *wr)
2229 struct bnxt_qplib_swqe wqe;
2230 int rc = 0, payload_sz = 0;
2232 memset(&wqe, 0, sizeof(wqe));
2235 memset(&wqe, 0, sizeof(wqe));
2238 wqe.num_sge = wr->num_sge;
2239 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2240 dev_err(rdev_to_dev(rdev),
2241 "Limit exceeded for Receive SGEs");
2245 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2247 wqe.wr_id = wr->wr_id;
2248 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2250 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2257 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2261 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2262 struct ib_recv_wr **bad_wr)
2264 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2265 struct bnxt_qplib_swqe wqe;
2266 int rc = 0, payload_sz = 0;
2267 unsigned long flags;
2270 spin_lock_irqsave(&qp->rq_lock, flags);
2273 memset(&wqe, 0, sizeof(wqe));
2276 wqe.num_sge = wr->num_sge;
2277 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2278 dev_err(rdev_to_dev(qp->rdev),
2279 "Limit exceeded for Receive SGEs");
2285 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2287 wqe.wr_id = wr->wr_id;
2288 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2290 if (ib_qp->qp_type == IB_QPT_GSI)
2291 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2294 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2300 /* Ring DB if the RQEs posted reaches a threshold value */
2301 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2302 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2310 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2312 spin_unlock_irqrestore(&qp->rq_lock, flags);
2317 /* Completion Queues */
2318 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2320 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2321 struct bnxt_re_dev *rdev = cq->rdev;
2324 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2326 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2329 if (!IS_ERR_OR_NULL(cq->umem))
2330 ib_umem_release(cq->umem);
2336 atomic_dec(&rdev->cq_count);
2341 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2342 const struct ib_cq_init_attr *attr,
2343 struct ib_ucontext *context,
2344 struct ib_udata *udata)
2346 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2347 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2348 struct bnxt_re_cq *cq = NULL;
2350 int cqe = attr->cqe;
2352 /* Validate CQ fields */
2353 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2354 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2355 return ERR_PTR(-EINVAL);
2357 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2359 return ERR_PTR(-ENOMEM);
2362 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2364 entries = roundup_pow_of_two(cqe + 1);
2365 if (entries > dev_attr->max_cq_wqes + 1)
2366 entries = dev_attr->max_cq_wqes + 1;
2369 struct bnxt_re_cq_req req;
2370 struct bnxt_re_ucontext *uctx = container_of
2372 struct bnxt_re_ucontext,
2374 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2379 cq->umem = ib_umem_get(context, req.cq_va,
2380 entries * sizeof(struct cq_base),
2381 IB_ACCESS_LOCAL_WRITE, 1);
2382 if (IS_ERR(cq->umem)) {
2383 rc = PTR_ERR(cq->umem);
2386 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2387 cq->qplib_cq.nmap = cq->umem->nmap;
2388 cq->qplib_cq.dpi = &uctx->dpi;
2390 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2391 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2398 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2399 cq->qplib_cq.sghead = NULL;
2400 cq->qplib_cq.nmap = 0;
2402 cq->qplib_cq.max_wqe = entries;
2403 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2405 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2407 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2411 cq->ib_cq.cqe = entries;
2412 cq->cq_period = cq->qplib_cq.period;
2415 atomic_inc(&rdev->cq_count);
2418 struct bnxt_re_cq_resp resp;
2420 resp.cqid = cq->qplib_cq.id;
2421 resp.tail = cq->qplib_cq.hwq.cons;
2422 resp.phase = cq->qplib_cq.period;
2424 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2426 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2427 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2436 ib_umem_release(cq->umem);
2443 static u8 __req_to_ib_wc_status(u8 qstatus)
2446 case CQ_REQ_STATUS_OK:
2447 return IB_WC_SUCCESS;
2448 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2449 return IB_WC_BAD_RESP_ERR;
2450 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2451 return IB_WC_LOC_LEN_ERR;
2452 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2453 return IB_WC_LOC_QP_OP_ERR;
2454 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2455 return IB_WC_LOC_PROT_ERR;
2456 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2457 return IB_WC_GENERAL_ERR;
2458 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2459 return IB_WC_REM_INV_REQ_ERR;
2460 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2461 return IB_WC_REM_ACCESS_ERR;
2462 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2463 return IB_WC_REM_OP_ERR;
2464 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2465 return IB_WC_RNR_RETRY_EXC_ERR;
2466 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2467 return IB_WC_RETRY_EXC_ERR;
2468 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2469 return IB_WC_WR_FLUSH_ERR;
2471 return IB_WC_GENERAL_ERR;
2476 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2479 case CQ_RES_RAWETH_QP1_STATUS_OK:
2480 return IB_WC_SUCCESS;
2481 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2482 return IB_WC_LOC_ACCESS_ERR;
2483 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2484 return IB_WC_LOC_LEN_ERR;
2485 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2486 return IB_WC_LOC_PROT_ERR;
2487 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2488 return IB_WC_LOC_QP_OP_ERR;
2489 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2490 return IB_WC_GENERAL_ERR;
2491 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2492 return IB_WC_WR_FLUSH_ERR;
2493 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2494 return IB_WC_WR_FLUSH_ERR;
2496 return IB_WC_GENERAL_ERR;
2500 static u8 __rc_to_ib_wc_status(u8 qstatus)
2503 case CQ_RES_RC_STATUS_OK:
2504 return IB_WC_SUCCESS;
2505 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2506 return IB_WC_LOC_ACCESS_ERR;
2507 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2508 return IB_WC_LOC_LEN_ERR;
2509 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2510 return IB_WC_LOC_PROT_ERR;
2511 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2512 return IB_WC_LOC_QP_OP_ERR;
2513 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2514 return IB_WC_GENERAL_ERR;
2515 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2516 return IB_WC_REM_INV_REQ_ERR;
2517 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2518 return IB_WC_WR_FLUSH_ERR;
2519 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2520 return IB_WC_WR_FLUSH_ERR;
2522 return IB_WC_GENERAL_ERR;
2526 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2528 switch (cqe->type) {
2529 case BNXT_QPLIB_SWQE_TYPE_SEND:
2530 wc->opcode = IB_WC_SEND;
2532 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2533 wc->opcode = IB_WC_SEND;
2534 wc->wc_flags |= IB_WC_WITH_IMM;
2536 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2537 wc->opcode = IB_WC_SEND;
2538 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2540 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2541 wc->opcode = IB_WC_RDMA_WRITE;
2543 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2544 wc->opcode = IB_WC_RDMA_WRITE;
2545 wc->wc_flags |= IB_WC_WITH_IMM;
2547 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2548 wc->opcode = IB_WC_RDMA_READ;
2550 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2551 wc->opcode = IB_WC_COMP_SWAP;
2553 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2554 wc->opcode = IB_WC_FETCH_ADD;
2556 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2557 wc->opcode = IB_WC_LOCAL_INV;
2559 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2560 wc->opcode = IB_WC_REG_MR;
2563 wc->opcode = IB_WC_SEND;
2567 wc->status = __req_to_ib_wc_status(cqe->status);
2570 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2571 u16 raweth_qp1_flags2)
2573 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2575 /* raweth_qp1_flags Bit 9-6 indicates itype */
2576 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2577 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2580 if (raweth_qp1_flags2 &
2581 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2583 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2585 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2586 (raweth_qp1_flags2 &
2587 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2588 (is_ipv6 = true) : (is_ipv4 = true);
2590 BNXT_RE_ROCEV2_IPV6_PACKET :
2591 BNXT_RE_ROCEV2_IPV4_PACKET);
2593 return BNXT_RE_ROCE_V1_PACKET;
2597 static int bnxt_re_to_ib_nw_type(int nw_type)
2599 u8 nw_hdr_type = 0xFF;
2602 case BNXT_RE_ROCE_V1_PACKET:
2603 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2605 case BNXT_RE_ROCEV2_IPV4_PACKET:
2606 nw_hdr_type = RDMA_NETWORK_IPV4;
2608 case BNXT_RE_ROCEV2_IPV6_PACKET:
2609 nw_hdr_type = RDMA_NETWORK_IPV6;
2615 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2619 struct ethhdr *eth_hdr;
2623 tmp_buf = (u8 *)rq_hdr_buf;
2625 * If dest mac is not same as I/F mac, this could be a
2626 * loopback address or multicast address, check whether
2627 * it is a loopback packet
2629 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2631 /* Check the ether type */
2632 eth_hdr = (struct ethhdr *)tmp_buf;
2633 eth_type = ntohs(eth_hdr->h_proto);
2641 struct udphdr *udp_hdr;
2643 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2644 sizeof(struct ipv6hdr));
2645 tmp_buf += sizeof(struct ethhdr) + len;
2646 udp_hdr = (struct udphdr *)tmp_buf;
2647 if (ntohs(udp_hdr->dest) ==
2660 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2661 struct bnxt_qplib_cqe *cqe)
2663 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2664 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2665 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2666 struct ib_send_wr *swr;
2667 struct ib_ud_wr udwr;
2668 struct ib_recv_wr rwr;
2672 dma_addr_t rq_hdr_buf_map;
2673 dma_addr_t shrq_hdr_buf_map;
2676 struct ib_sge s_sge[2];
2677 struct ib_sge r_sge[2];
2680 memset(&udwr, 0, sizeof(udwr));
2681 memset(&rwr, 0, sizeof(rwr));
2682 memset(&s_sge, 0, sizeof(s_sge));
2683 memset(&r_sge, 0, sizeof(r_sge));
2686 tbl_idx = cqe->wr_id;
2688 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2689 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2690 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2693 /* Shadow QP header buffer */
2694 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2696 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2698 /* Store this cqe */
2699 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2700 sqp_entry->qp1_qp = qp1_qp;
2702 /* Find packet type from the cqe */
2704 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2705 cqe->raweth_qp1_flags2);
2707 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2711 /* Adjust the offset for the user buffer and post in the rq */
2713 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2717 * QP1 loopback packet has 4 bytes of internal header before
2718 * ether header. Skip these four bytes.
2720 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2723 /* First send SGE . Skip the ether header*/
2724 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2726 s_sge[0].lkey = 0xFFFFFFFF;
2727 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2728 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2730 /* Second Send SGE */
2731 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2732 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2733 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2735 s_sge[1].lkey = 0xFFFFFFFF;
2736 s_sge[1].length = 256;
2738 /* First recv SGE */
2740 r_sge[0].addr = shrq_hdr_buf_map;
2741 r_sge[0].lkey = 0xFFFFFFFF;
2742 r_sge[0].length = 40;
2744 r_sge[1].addr = sqp_entry->sge.addr + offset;
2745 r_sge[1].lkey = sqp_entry->sge.lkey;
2746 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2748 /* Create receive work request */
2750 rwr.sg_list = r_sge;
2751 rwr.wr_id = tbl_idx;
2754 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2756 dev_err(rdev_to_dev(rdev),
2757 "Failed to post Rx buffers to shadow QP");
2762 swr->sg_list = s_sge;
2763 swr->wr_id = tbl_idx;
2764 swr->opcode = IB_WR_SEND;
2767 udwr.ah = &rdev->sqp_ah->ib_ah;
2768 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2769 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2771 /* post data received in the send queue */
2772 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2777 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2778 struct bnxt_qplib_cqe *cqe)
2780 wc->opcode = IB_WC_RECV;
2781 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2782 wc->wc_flags |= IB_WC_GRH;
2785 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2786 struct bnxt_qplib_cqe *cqe)
2788 wc->opcode = IB_WC_RECV;
2789 wc->status = __rc_to_ib_wc_status(cqe->status);
2791 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2792 wc->wc_flags |= IB_WC_WITH_IMM;
2793 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2794 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2795 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2796 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2797 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2800 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2802 struct bnxt_qplib_cqe *cqe)
2805 struct bnxt_re_dev *rdev = qp->rdev;
2806 struct bnxt_re_qp *qp1_qp = NULL;
2807 struct bnxt_qplib_cqe *orig_cqe = NULL;
2808 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2811 tbl_idx = cqe->wr_id;
2813 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2814 qp1_qp = sqp_entry->qp1_qp;
2815 orig_cqe = &sqp_entry->cqe;
2817 wc->wr_id = sqp_entry->wrid;
2818 wc->byte_len = orig_cqe->length;
2819 wc->qp = &qp1_qp->ib_qp;
2821 wc->ex.imm_data = orig_cqe->immdata;
2822 wc->src_qp = orig_cqe->src_qp;
2823 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2825 wc->vendor_err = orig_cqe->status;
2827 wc->opcode = IB_WC_RECV;
2828 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2829 wc->wc_flags |= IB_WC_GRH;
2831 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2832 orig_cqe->raweth_qp1_flags2);
2834 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2835 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2839 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2840 struct bnxt_qplib_cqe *cqe)
2842 wc->opcode = IB_WC_RECV;
2843 wc->status = __rc_to_ib_wc_status(cqe->status);
2845 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2846 wc->wc_flags |= IB_WC_WITH_IMM;
2847 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2848 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2849 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2850 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2851 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2854 static int send_phantom_wqe(struct bnxt_re_qp *qp)
2856 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2857 unsigned long flags;
2860 spin_lock_irqsave(&qp->sq_lock, flags);
2862 rc = bnxt_re_bind_fence_mw(lib_qp);
2864 lib_qp->sq.phantom_wqe_cnt++;
2865 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2866 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2867 lib_qp->id, lib_qp->sq.hwq.prod,
2868 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2869 lib_qp->sq.phantom_wqe_cnt);
2872 spin_unlock_irqrestore(&qp->sq_lock, flags);
2876 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2878 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2879 struct bnxt_re_qp *qp;
2880 struct bnxt_qplib_cqe *cqe;
2881 int i, ncqe, budget;
2882 struct bnxt_qplib_q *sq;
2883 struct bnxt_qplib_qp *lib_qp;
2885 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2886 unsigned long flags;
2888 spin_lock_irqsave(&cq->cq_lock, flags);
2889 budget = min_t(u32, num_entries, cq->max_cql);
2891 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2897 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2900 if (sq->send_phantom) {
2901 qp = container_of(lib_qp,
2902 struct bnxt_re_qp, qplib_qp);
2903 if (send_phantom_wqe(qp) == -ENOMEM)
2904 dev_err(rdev_to_dev(cq->rdev),
2905 "Phantom failed! Scheduled to send again\n");
2907 sq->send_phantom = false;
2914 for (i = 0; i < ncqe; i++, cqe++) {
2915 /* Transcribe each qplib_wqe back to ib_wc */
2916 memset(wc, 0, sizeof(*wc));
2918 wc->wr_id = cqe->wr_id;
2919 wc->byte_len = cqe->length;
2921 ((struct bnxt_qplib_qp *)
2922 (unsigned long)(cqe->qp_handle),
2923 struct bnxt_re_qp, qplib_qp);
2925 dev_err(rdev_to_dev(cq->rdev),
2926 "POLL CQ : bad QP handle");
2929 wc->qp = &qp->ib_qp;
2930 wc->ex.imm_data = cqe->immdata;
2931 wc->src_qp = cqe->src_qp;
2932 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2934 wc->vendor_err = cqe->status;
2936 switch (cqe->opcode) {
2937 case CQ_BASE_CQE_TYPE_REQ:
2938 if (qp->qplib_qp.id ==
2939 qp->rdev->qp1_sqp->qplib_qp.id) {
2940 /* Handle this completion with
2941 * the stored completion
2943 memset(wc, 0, sizeof(*wc));
2946 bnxt_re_process_req_wc(wc, cqe);
2948 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2952 rc = bnxt_re_process_raw_qp_pkt_rx
2955 memset(wc, 0, sizeof(*wc));
2960 /* Errors need not be looped back.
2961 * But change the wr_id to the one
2962 * stored in the table
2964 tbl_idx = cqe->wr_id;
2965 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2966 wc->wr_id = sqp_entry->wrid;
2967 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2969 case CQ_BASE_CQE_TYPE_RES_RC:
2970 bnxt_re_process_res_rc_wc(wc, cqe);
2972 case CQ_BASE_CQE_TYPE_RES_UD:
2973 if (qp->qplib_qp.id ==
2974 qp->rdev->qp1_sqp->qplib_qp.id) {
2975 /* Handle this completion with
2976 * the stored completion
2981 bnxt_re_process_res_shadow_qp_wc
2986 bnxt_re_process_res_ud_wc(wc, cqe);
2989 dev_err(rdev_to_dev(cq->rdev),
2990 "POLL CQ : type 0x%x not handled",
2999 spin_unlock_irqrestore(&cq->cq_lock, flags);
3000 return num_entries - budget;
3003 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3004 enum ib_cq_notify_flags ib_cqn_flags)
3006 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3009 /* Trigger on the very next completion */
3010 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3011 type = DBR_DBR_TYPE_CQ_ARMALL;
3012 /* Trigger on the next solicited completion */
3013 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3014 type = DBR_DBR_TYPE_CQ_ARMSE;
3016 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3021 /* Memory Regions */
3022 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3024 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3025 struct bnxt_re_dev *rdev = pd->rdev;
3026 struct bnxt_re_mr *mr;
3030 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3032 return ERR_PTR(-ENOMEM);
3035 mr->qplib_mr.pd = &pd->qplib_pd;
3036 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3037 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3039 /* Allocate and register 0 as the address */
3040 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3044 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3045 mr->qplib_mr.total_size = -1; /* Infinte length */
3046 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3050 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3051 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3052 IB_ACCESS_REMOTE_ATOMIC))
3053 mr->ib_mr.rkey = mr->ib_mr.lkey;
3054 atomic_inc(&rdev->mr_count);
3059 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3065 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3067 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3068 struct bnxt_re_dev *rdev = mr->rdev;
3071 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3073 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3077 if (mr->npages && mr->pages) {
3078 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3084 if (!IS_ERR_OR_NULL(mr->ib_umem))
3085 ib_umem_release(mr->ib_umem);
3088 atomic_dec(&rdev->mr_count);
3092 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3094 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3096 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3099 mr->pages[mr->npages++] = addr;
3103 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3104 unsigned int *sg_offset)
3106 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3109 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3112 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3115 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3116 struct bnxt_re_dev *rdev = pd->rdev;
3117 struct bnxt_re_mr *mr = NULL;
3120 if (type != IB_MR_TYPE_MEM_REG) {
3121 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3122 return ERR_PTR(-EINVAL);
3124 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3125 return ERR_PTR(-EINVAL);
3127 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3129 return ERR_PTR(-ENOMEM);
3132 mr->qplib_mr.pd = &pd->qplib_pd;
3133 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3134 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3136 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3140 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3141 mr->ib_mr.rkey = mr->ib_mr.lkey;
3143 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3148 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3149 &mr->qplib_frpl, max_num_sg);
3151 dev_err(rdev_to_dev(rdev),
3152 "Failed to allocate HW FR page list");
3156 atomic_inc(&rdev->mr_count);
3160 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3167 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3168 struct ib_udata *udata)
3170 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3171 struct bnxt_re_dev *rdev = pd->rdev;
3172 struct bnxt_re_mw *mw;
3175 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3177 return ERR_PTR(-ENOMEM);
3179 mw->qplib_mw.pd = &pd->qplib_pd;
3181 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3182 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3183 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3184 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3186 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3189 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3191 atomic_inc(&rdev->mw_count);
3199 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3201 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3202 struct bnxt_re_dev *rdev = mw->rdev;
3205 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3207 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3212 atomic_dec(&rdev->mw_count);
3217 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3218 u64 virt_addr, int mr_access_flags,
3219 struct ib_udata *udata)
3221 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3222 struct bnxt_re_dev *rdev = pd->rdev;
3223 struct bnxt_re_mr *mr;
3224 struct ib_umem *umem;
3225 u64 *pbl_tbl, *pbl_tbl_orig;
3226 int i, umem_pgs, pages, rc;
3227 struct scatterlist *sg;
3230 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3232 return ERR_PTR(-ENOMEM);
3235 mr->qplib_mr.pd = &pd->qplib_pd;
3236 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3237 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3239 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3240 mr_access_flags, 0);
3242 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3248 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3250 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3253 /* The fixed portion of the rkey is the same as the lkey */
3254 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3256 mr->qplib_mr.va = virt_addr;
3257 umem_pgs = ib_umem_page_count(umem);
3259 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3263 mr->qplib_mr.total_size = length;
3265 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3270 pbl_tbl_orig = pbl_tbl;
3272 if (umem->hugetlb) {
3273 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3278 if (umem->page_shift != PAGE_SHIFT) {
3279 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
3283 /* Map umem buf ptrs to the PBL */
3284 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3285 pages = sg_dma_len(sg) >> umem->page_shift;
3286 for (i = 0; i < pages; i++, pbl_tbl++)
3287 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
3289 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3292 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3296 kfree(pbl_tbl_orig);
3298 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3299 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3300 atomic_inc(&rdev->mr_count);
3304 kfree(pbl_tbl_orig);
3306 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3308 ib_umem_release(umem);
3314 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3315 struct ib_udata *udata)
3317 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3318 struct bnxt_re_uctx_resp resp;
3319 struct bnxt_re_ucontext *uctx;
3320 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3323 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3324 ibdev->uverbs_abi_ver);
3326 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3327 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3328 BNXT_RE_ABI_VERSION);
3329 return ERR_PTR(-EPERM);
3332 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3334 return ERR_PTR(-ENOMEM);
3338 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3343 spin_lock_init(&uctx->sh_lock);
3345 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3346 resp.max_qp = rdev->qplib_ctx.qpc_count;
3347 resp.pg_size = PAGE_SIZE;
3348 resp.cqe_sz = sizeof(struct cq_base);
3349 resp.max_cqd = dev_attr->max_cq_wqes;
3352 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3354 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3359 return &uctx->ib_uctx;
3361 free_page((unsigned long)uctx->shpg);
3368 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3370 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3371 struct bnxt_re_ucontext,
3374 struct bnxt_re_dev *rdev = uctx->rdev;
3378 free_page((unsigned long)uctx->shpg);
3380 if (uctx->dpi.dbr) {
3381 /* Free DPI only if this is the first PD allocated by the
3382 * application and mark the context dpi as NULL
3384 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3385 &rdev->qplib_res.dpi_tbl,
3388 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3389 /* Don't fail, continue*/
3390 uctx->dpi.dbr = NULL;
3397 /* Helper function to mmap the virtual memory from user app */
3398 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3400 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3401 struct bnxt_re_ucontext,
3403 struct bnxt_re_dev *rdev = uctx->rdev;
3406 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3409 if (vma->vm_pgoff) {
3410 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3411 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3412 PAGE_SIZE, vma->vm_page_prot)) {
3413 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3417 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3418 if (remap_pfn_range(vma, vma->vm_start,
3419 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3420 dev_err(rdev_to_dev(rdev),
3421 "Failed to map shared page");