1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
47 #include <linux/qed/qed_if.h>
50 #include <rdma/qedr-abi.h>
53 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
64 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
70 if (!rdma_cap_roce_gid_table(ibdev, port))
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
75 memcpy(sgid, &zgid, sizeof(*sgid));
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
85 int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
89 if (!rdma_cap_roce_gid_table(device, port_num))
92 if (port_num > QEDR_MAX_PORT)
101 int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
104 if (!rdma_cap_roce_gid_table(device, port_num))
107 if (port_num > QEDR_MAX_PORT)
116 int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
122 if (!dev->rdma_ctx) {
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
129 memset(attr, 0, sizeof(*attr));
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
172 #define QEDR_SPEED_SDR (1)
173 #define QEDR_SPEED_DDR (2)
174 #define QEDR_SPEED_QDR (4)
175 #define QEDR_SPEED_FDR10 (8)
176 #define QEDR_SPEED_FDR (16)
177 #define QEDR_SPEED_EDR (32)
179 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
224 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
229 dev = get_qedr_dev(ibdev);
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
242 /* *attr being zeroed by the caller, avoid zeroing it here */
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
269 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
272 struct qedr_dev *dev;
274 dev = get_qedr_dev(ibdev);
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
283 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
315 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
337 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
347 return ERR_PTR(-EFAULT);
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
351 return ERR_PTR(-ENOMEM);
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
368 memset(&uresp, 0, sizeof(uresp));
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
392 return &ctx->ibucontext;
399 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
421 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
440 found = qedr_search_mmap(ucontext, vm_page, len);
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
470 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
478 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
479 (udata && context) ? "User Lib" : "Kernel");
481 if (!dev->rdma_ctx) {
482 DP_ERR(dev, "invlaid RDMA context\n");
483 return ERR_PTR(-EINVAL);
486 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
488 return ERR_PTR(-ENOMEM);
490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
508 pd->uctx = get_qedr_ucontext(context);
519 int qedr_dealloc_pd(struct ib_pd *ibpd)
521 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
522 struct qedr_pd *pd = get_qedr_pd(ibpd);
525 pr_err("Invalid PD received in dealloc_pd\n");
529 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
530 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
537 static void qedr_free_pbl(struct qedr_dev *dev,
538 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
540 struct pci_dev *pdev = dev->pdev;
543 for (i = 0; i < pbl_info->num_pbls; i++) {
546 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
547 pbl[i].va, pbl[i].pa);
553 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
554 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
556 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
557 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
558 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
560 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
561 struct qedr_pbl_info *pbl_info,
564 struct pci_dev *pdev = dev->pdev;
565 struct qedr_pbl *pbl_table;
566 dma_addr_t *pbl_main_tbl;
571 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
573 return ERR_PTR(-ENOMEM);
575 for (i = 0; i < pbl_info->num_pbls; i++) {
576 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
581 memset(va, 0, pbl_info->pbl_size);
582 pbl_table[i].va = va;
583 pbl_table[i].pa = pa;
586 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
587 * the first one with physical pointers to all of the rest
589 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
590 for (i = 0; i < pbl_info->num_pbls - 1; i++)
591 pbl_main_tbl[i] = pbl_table[i + 1].pa;
596 for (i--; i >= 0; i--)
597 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
598 pbl_table[i].va, pbl_table[i].pa);
600 qedr_free_pbl(dev, pbl_info, pbl_table);
602 return ERR_PTR(-ENOMEM);
605 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
606 struct qedr_pbl_info *pbl_info,
607 u32 num_pbes, int two_layer_capable)
613 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
614 if (num_pbes > MAX_PBES_TWO_LAYER) {
615 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
620 /* calculate required pbl page size */
621 pbl_size = MIN_FW_PBL_PAGE_SIZE;
622 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
623 NUM_PBES_ON_PAGE(pbl_size);
625 while (pbl_capacity < num_pbes) {
627 pbl_capacity = pbl_size / sizeof(u64);
628 pbl_capacity = pbl_capacity * pbl_capacity;
631 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
632 num_pbls++; /* One for the layer0 ( points to the pbls) */
633 pbl_info->two_layered = true;
635 /* One layered PBL */
637 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
638 roundup_pow_of_two((num_pbes * sizeof(u64))));
639 pbl_info->two_layered = false;
642 pbl_info->num_pbls = num_pbls;
643 pbl_info->pbl_size = pbl_size;
644 pbl_info->num_pbes = num_pbes;
646 DP_DEBUG(dev, QEDR_MSG_MR,
647 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
648 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
653 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
654 struct qedr_pbl *pbl,
655 struct qedr_pbl_info *pbl_info)
657 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
658 struct qedr_pbl *pbl_tbl;
659 struct scatterlist *sg;
664 if (!pbl_info->num_pbes)
667 /* If we have a two layered pbl, the first pbl points to the rest
668 * of the pbls and the first entry lays on the second pbl in the table
670 if (pbl_info->two_layered)
675 pbe = (struct regpair *)pbl_tbl->va;
677 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
683 shift = ilog2(umem->page_size);
685 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
686 pages = sg_dma_len(sg) >> shift;
687 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
688 /* store the page address in pbe */
689 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
690 umem->page_size * pg_cnt);
691 addr = upper_32_bits(sg_dma_address(sg) +
692 umem->page_size * pg_cnt);
693 pbe->hi = cpu_to_le32(addr);
698 if (total_num_pbes == pbl_info->num_pbes)
701 /* If the given pbl is full storing the pbes,
704 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
706 pbe = (struct regpair *)pbl_tbl->va;
713 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
714 struct qedr_cq *cq, struct ib_udata *udata)
716 struct qedr_create_cq_uresp uresp;
719 memset(&uresp, 0, sizeof(uresp));
721 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
722 uresp.icid = cq->icid;
724 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
726 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
731 static void consume_cqe(struct qedr_cq *cq)
733 if (cq->latest_cqe == cq->toggle_cqe)
734 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
736 cq->latest_cqe = qed_chain_consume(&cq->pbl);
739 static inline int qedr_align_cq_entries(int entries)
741 u64 size, aligned_size;
743 /* We allocate an extra entry that we don't report to the FW. */
744 size = (entries + 1) * QEDR_CQE_SIZE;
745 aligned_size = ALIGN(size, PAGE_SIZE);
747 return aligned_size / QEDR_CQE_SIZE;
750 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
751 struct qedr_dev *dev,
752 struct qedr_userq *q,
753 u64 buf_addr, size_t buf_len,
754 int access, int dmasync)
759 q->buf_addr = buf_addr;
760 q->buf_len = buf_len;
761 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
762 if (IS_ERR(q->umem)) {
763 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
765 return PTR_ERR(q->umem);
768 page_cnt = ib_umem_page_count(q->umem);
769 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
773 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
774 if (IS_ERR(q->pbl_tbl)) {
775 rc = PTR_ERR(q->pbl_tbl);
779 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
784 ib_umem_release(q->umem);
789 static inline void qedr_init_cq_params(struct qedr_cq *cq,
790 struct qedr_ucontext *ctx,
791 struct qedr_dev *dev, int vector,
792 int chain_entries, int page_cnt,
794 struct qed_rdma_create_cq_in_params
797 memset(params, 0, sizeof(*params));
798 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
799 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
800 params->cnq_id = vector;
801 params->cq_size = chain_entries - 1;
802 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
803 params->pbl_num_pages = page_cnt;
804 params->pbl_ptr = pbl_ptr;
805 params->pbl_two_level = 0;
808 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
810 /* Flush data before signalling doorbell */
812 cq->db.data.agg_flags = flags;
813 cq->db.data.value = cpu_to_le32(cons);
814 writeq(cq->db.raw, cq->db_addr);
816 /* Make sure write would stick */
820 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
822 struct qedr_cq *cq = get_qedr_cq(ibcq);
823 unsigned long sflags;
825 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
828 spin_lock_irqsave(&cq->cq_lock, sflags);
832 if (flags & IB_CQ_SOLICITED)
833 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
835 if (flags & IB_CQ_NEXT_COMP)
836 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
838 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
840 spin_unlock_irqrestore(&cq->cq_lock, sflags);
845 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
846 const struct ib_cq_init_attr *attr,
847 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
849 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
850 struct qed_rdma_destroy_cq_out_params destroy_oparams;
851 struct qed_rdma_destroy_cq_in_params destroy_iparams;
852 struct qedr_dev *dev = get_qedr_dev(ibdev);
853 struct qed_rdma_create_cq_in_params params;
854 struct qedr_create_cq_ureq ureq;
855 int vector = attr->comp_vector;
856 int entries = attr->cqe;
864 DP_DEBUG(dev, QEDR_MSG_INIT,
865 "create_cq: called from %s. entries=%d, vector=%d\n",
866 udata ? "User Lib" : "Kernel", entries, vector);
868 if (entries > QEDR_MAX_CQES) {
870 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
871 entries, QEDR_MAX_CQES);
872 return ERR_PTR(-EINVAL);
875 chain_entries = qedr_align_cq_entries(entries);
876 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
878 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
880 return ERR_PTR(-ENOMEM);
883 memset(&ureq, 0, sizeof(ureq));
884 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
886 "create cq: problem copying data from user space\n");
892 "create cq: cannot create a cq with 0 entries\n");
896 cq->cq_type = QEDR_CQ_TYPE_USER;
898 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
899 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
903 pbl_ptr = cq->q.pbl_tbl->pa;
904 page_cnt = cq->q.pbl_info.num_pbes;
906 cq->ibcq.cqe = chain_entries;
908 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
910 rc = dev->ops->common->chain_alloc(dev->cdev,
911 QED_CHAIN_USE_TO_CONSUME,
913 QED_CHAIN_CNT_TYPE_U32,
915 sizeof(union rdma_cqe),
920 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
921 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
922 cq->ibcq.cqe = cq->pbl.capacity;
925 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
928 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
933 cq->sig = QEDR_CQ_MAGIC_NUMBER;
934 spin_lock_init(&cq->cq_lock);
937 rc = qedr_copy_cq_uresp(dev, cq, udata);
941 /* Generate doorbell address. */
942 cq->db_addr = dev->db_addr +
943 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
944 cq->db.data.icid = cq->icid;
945 cq->db.data.params = DB_AGG_CMD_SET <<
946 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
948 /* point to the very last element, passing it we will toggle */
949 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
950 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
951 cq->latest_cqe = NULL;
953 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
956 DP_DEBUG(dev, QEDR_MSG_CQ,
957 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
958 cq->icid, cq, params.cq_size);
963 destroy_iparams.icid = cq->icid;
964 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
968 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
970 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
973 ib_umem_release(cq->q.umem);
976 return ERR_PTR(-EINVAL);
979 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
981 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
982 struct qedr_cq *cq = get_qedr_cq(ibcq);
984 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
989 int qedr_destroy_cq(struct ib_cq *ibcq)
991 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
992 struct qed_rdma_destroy_cq_out_params oparams;
993 struct qed_rdma_destroy_cq_in_params iparams;
994 struct qedr_cq *cq = get_qedr_cq(ibcq);
996 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
998 /* GSIs CQs are handled by driver, so they don't exist in the FW */
999 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
1002 iparams.icid = cq->icid;
1003 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
1007 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1010 if (ibcq->uobject && ibcq->uobject->context) {
1011 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1012 ib_umem_release(cq->q.umem);
1020 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1021 struct ib_qp_attr *attr,
1023 struct qed_rdma_modify_qp_in_params
1026 enum rdma_network_type nw_type;
1027 struct ib_gid_attr gid_attr;
1033 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1034 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1038 if (!memcmp(&gid, &zgid, sizeof(gid)))
1041 if (gid_attr.ndev) {
1042 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1044 dev_put(gid_attr.ndev);
1045 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1047 case RDMA_NETWORK_IPV6:
1048 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1049 sizeof(qp_params->sgid));
1050 memcpy(&qp_params->dgid.bytes[0],
1051 &attr->ah_attr.grh.dgid,
1052 sizeof(qp_params->dgid));
1053 qp_params->roce_mode = ROCE_V2_IPV6;
1054 SET_FIELD(qp_params->modify_flags,
1055 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1057 case RDMA_NETWORK_IB:
1058 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1059 sizeof(qp_params->sgid));
1060 memcpy(&qp_params->dgid.bytes[0],
1061 &attr->ah_attr.grh.dgid,
1062 sizeof(qp_params->dgid));
1063 qp_params->roce_mode = ROCE_V1;
1065 case RDMA_NETWORK_IPV4:
1066 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1067 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1068 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1069 qp_params->sgid.ipv4_addr = ipv4_addr;
1071 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1072 qp_params->dgid.ipv4_addr = ipv4_addr;
1073 SET_FIELD(qp_params->modify_flags,
1074 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1075 qp_params->roce_mode = ROCE_V2_IPV4;
1080 for (i = 0; i < 4; i++) {
1081 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1082 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1085 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1086 qp_params->vlan_id = 0;
1091 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1092 struct ib_qp_init_attr *attrs)
1094 struct qedr_device_attr *qattr = &dev->attr;
1096 /* QP0... attrs->qp_type == IB_QPT_GSI */
1097 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1098 DP_DEBUG(dev, QEDR_MSG_QP,
1099 "create qp: unsupported qp type=0x%x requested\n",
1104 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1106 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1107 attrs->cap.max_send_wr, qattr->max_sqe);
1111 if (attrs->cap.max_inline_data > qattr->max_inline) {
1113 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1114 attrs->cap.max_inline_data, qattr->max_inline);
1118 if (attrs->cap.max_send_sge > qattr->max_sge) {
1120 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1121 attrs->cap.max_send_sge, qattr->max_sge);
1125 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1127 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1128 attrs->cap.max_recv_sge, qattr->max_sge);
1132 /* Unprivileged user space cannot create special QP */
1133 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1135 "create qp: userspace can't create special QPs of type=0x%x\n",
1143 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1146 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1147 uresp->rq_icid = qp->icid;
1150 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1153 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1154 uresp->sq_icid = qp->icid + 1;
1157 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1158 struct qedr_qp *qp, struct ib_udata *udata)
1160 struct qedr_create_qp_uresp uresp;
1163 memset(&uresp, 0, sizeof(uresp));
1164 qedr_copy_sq_uresp(&uresp, qp);
1165 qedr_copy_rq_uresp(&uresp, qp);
1167 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1168 uresp.qp_id = qp->qp_id;
1170 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1173 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1179 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1182 struct ib_qp_init_attr *attrs)
1184 spin_lock_init(&qp->q_lock);
1186 qp->qp_type = attrs->qp_type;
1187 qp->max_inline_data = attrs->cap.max_inline_data;
1188 qp->sq.max_sges = attrs->cap.max_send_sge;
1189 qp->state = QED_ROCE_QP_STATE_RESET;
1190 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1191 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1192 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1194 qp->rq.max_sges = attrs->cap.max_recv_sge;
1196 DP_DEBUG(dev, QEDR_MSG_QP,
1197 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1198 qp->rq.max_sges, qp->rq_cq->icid);
1199 DP_DEBUG(dev, QEDR_MSG_QP,
1200 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1201 pd->pd_id, qp->qp_type, qp->max_inline_data,
1202 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1203 DP_DEBUG(dev, QEDR_MSG_QP,
1204 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1205 qp->sq.max_sges, qp->sq_cq->icid);
1208 static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1210 qp->sq.db = dev->db_addr +
1211 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1212 qp->sq.db_data.data.icid = qp->icid + 1;
1213 qp->rq.db = dev->db_addr +
1214 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1215 qp->rq.db_data.data.icid = qp->icid;
1219 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1222 struct ib_qp_init_attr *attrs,
1223 bool fmr_and_reserved_lkey,
1224 struct qed_rdma_create_qp_in_params *params)
1226 /* QP handle to be written in an async event */
1227 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1228 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1230 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1231 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1232 params->pd = pd->pd_id;
1233 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1234 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1235 params->stats_queue = 0;
1236 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1238 params->use_srq = false;
1241 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1243 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1252 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1255 static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1258 ib_umem_release(qp->usq.umem);
1259 qp->usq.umem = NULL;
1262 ib_umem_release(qp->urq.umem);
1263 qp->urq.umem = NULL;
1266 static int qedr_create_user_qp(struct qedr_dev *dev,
1269 struct ib_udata *udata,
1270 struct ib_qp_init_attr *attrs)
1272 struct qed_rdma_create_qp_in_params in_params;
1273 struct qed_rdma_create_qp_out_params out_params;
1274 struct qedr_pd *pd = get_qedr_pd(ibpd);
1275 struct ib_ucontext *ib_ctx = NULL;
1276 struct qedr_ucontext *ctx = NULL;
1277 struct qedr_create_qp_ureq ureq;
1280 ib_ctx = ibpd->uobject->context;
1281 ctx = get_qedr_ucontext(ib_ctx);
1283 memset(&ureq, 0, sizeof(ureq));
1284 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1286 DP_ERR(dev, "Problem copying data from user space\n");
1290 /* SQ - read access only (0), dma sync not required (0) */
1291 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1296 /* RQ - read access only (0), dma sync not required (0) */
1297 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1303 memset(&in_params, 0, sizeof(in_params));
1304 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1305 in_params.qp_handle_lo = ureq.qp_handle_lo;
1306 in_params.qp_handle_hi = ureq.qp_handle_hi;
1307 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1308 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1309 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1310 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1312 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1313 &in_params, &out_params);
1320 qp->qp_id = out_params.qp_id;
1321 qp->icid = out_params.icid;
1323 rc = qedr_copy_qp_uresp(dev, qp, udata);
1327 qedr_qp_user_print(dev, qp);
1331 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1333 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1336 qedr_cleanup_user(dev, qp);
1341 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1343 struct qed_rdma_create_qp_in_params *in_params,
1344 u32 n_sq_elems, u32 n_rq_elems)
1346 struct qed_rdma_create_qp_out_params out_params;
1349 rc = dev->ops->common->chain_alloc(dev->cdev,
1350 QED_CHAIN_USE_TO_PRODUCE,
1352 QED_CHAIN_CNT_TYPE_U32,
1354 QEDR_SQE_ELEMENT_SIZE,
1360 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1361 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1363 rc = dev->ops->common->chain_alloc(dev->cdev,
1364 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1366 QED_CHAIN_CNT_TYPE_U32,
1368 QEDR_RQE_ELEMENT_SIZE,
1373 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1374 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1376 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1377 in_params, &out_params);
1382 qp->qp_id = out_params.qp_id;
1383 qp->icid = out_params.icid;
1385 qedr_set_roce_db_info(dev, qp);
1390 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
1392 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1393 kfree(qp->wqe_wr_id);
1395 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1396 kfree(qp->rqe_wr_id);
1399 static int qedr_create_kernel_qp(struct qedr_dev *dev,
1402 struct ib_qp_init_attr *attrs)
1404 struct qed_rdma_create_qp_in_params in_params;
1405 struct qedr_pd *pd = get_qedr_pd(ibpd);
1411 memset(&in_params, 0, sizeof(in_params));
1413 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1414 * the ring. The ring should allow at least a single WR, even if the
1415 * user requested none, due to allocation issues.
1416 * We should add an extra WR since the prod and cons indices of
1417 * wqe_wr_id are managed in such a way that the WQ is considered full
1418 * when (prod+1)%max_wr==cons. We currently don't do that because we
1419 * double the number of entries due an iSER issue that pushes far more
1420 * WRs than indicated. If we decline its ib_post_send() then we get
1421 * error prints in the dmesg we'd like to avoid.
1423 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1426 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1428 if (!qp->wqe_wr_id) {
1429 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1433 /* QP handle to be written in CQE */
1434 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1435 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
1437 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1438 * the ring. There ring should allow at least a single WR, even if the
1439 * user requested none, due to allocation issues.
1441 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1443 /* Allocate driver internal RQ array */
1444 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1446 if (!qp->rqe_wr_id) {
1448 "create qp: failed RQ shadow memory allocation\n");
1449 kfree(qp->wqe_wr_id);
1453 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
1455 n_sq_entries = attrs->cap.max_send_wr;
1456 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1457 n_sq_entries = max_t(u32, n_sq_entries, 1);
1458 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1460 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1462 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1463 n_sq_elems, n_rq_elems);
1465 qedr_cleanup_kernel(dev, qp);
1470 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1471 struct ib_qp_init_attr *attrs,
1472 struct ib_udata *udata)
1474 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1475 struct qedr_pd *pd = get_qedr_pd(ibpd);
1480 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1481 udata ? "user library" : "kernel", pd);
1483 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1488 return ERR_PTR(-EINVAL);
1490 DP_DEBUG(dev, QEDR_MSG_QP,
1491 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1492 udata ? "user library" : "kernel", attrs->event_handler, pd,
1493 get_qedr_cq(attrs->send_cq),
1494 get_qedr_cq(attrs->send_cq)->icid,
1495 get_qedr_cq(attrs->recv_cq),
1496 get_qedr_cq(attrs->recv_cq)->icid);
1498 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1500 DP_ERR(dev, "create qp: failed allocating memory\n");
1501 return ERR_PTR(-ENOMEM);
1504 qedr_set_common_qp_params(dev, qp, pd, attrs);
1506 if (attrs->qp_type == IB_QPT_GSI) {
1507 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1514 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1516 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
1521 qp->ibqp.qp_num = qp->qp_id;
1528 return ERR_PTR(-EFAULT);
1531 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1534 case QED_ROCE_QP_STATE_RESET:
1535 return IB_QPS_RESET;
1536 case QED_ROCE_QP_STATE_INIT:
1538 case QED_ROCE_QP_STATE_RTR:
1540 case QED_ROCE_QP_STATE_RTS:
1542 case QED_ROCE_QP_STATE_SQD:
1544 case QED_ROCE_QP_STATE_ERR:
1546 case QED_ROCE_QP_STATE_SQE:
1552 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1553 enum ib_qp_state qp_state)
1557 return QED_ROCE_QP_STATE_RESET;
1559 return QED_ROCE_QP_STATE_INIT;
1561 return QED_ROCE_QP_STATE_RTR;
1563 return QED_ROCE_QP_STATE_RTS;
1565 return QED_ROCE_QP_STATE_SQD;
1567 return QED_ROCE_QP_STATE_ERR;
1569 return QED_ROCE_QP_STATE_ERR;
1573 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1575 qed_chain_reset(&qph->pbl);
1579 qph->db_data.data.value = cpu_to_le16(0);
1582 static int qedr_update_qp_state(struct qedr_dev *dev,
1584 enum qed_roce_qp_state new_state)
1588 if (new_state == qp->state)
1591 switch (qp->state) {
1592 case QED_ROCE_QP_STATE_RESET:
1593 switch (new_state) {
1594 case QED_ROCE_QP_STATE_INIT:
1595 qp->prev_wqe_size = 0;
1596 qedr_reset_qp_hwq_info(&qp->sq);
1597 qedr_reset_qp_hwq_info(&qp->rq);
1604 case QED_ROCE_QP_STATE_INIT:
1605 switch (new_state) {
1606 case QED_ROCE_QP_STATE_RTR:
1607 /* Update doorbell (in case post_recv was
1608 * done before move to RTR)
1611 writel(qp->rq.db_data.raw, qp->rq.db);
1612 /* Make sure write takes effect */
1615 case QED_ROCE_QP_STATE_ERR:
1618 /* Invalid state change. */
1623 case QED_ROCE_QP_STATE_RTR:
1625 switch (new_state) {
1626 case QED_ROCE_QP_STATE_RTS:
1628 case QED_ROCE_QP_STATE_ERR:
1631 /* Invalid state change. */
1636 case QED_ROCE_QP_STATE_RTS:
1638 switch (new_state) {
1639 case QED_ROCE_QP_STATE_SQD:
1641 case QED_ROCE_QP_STATE_ERR:
1644 /* Invalid state change. */
1649 case QED_ROCE_QP_STATE_SQD:
1651 switch (new_state) {
1652 case QED_ROCE_QP_STATE_RTS:
1653 case QED_ROCE_QP_STATE_ERR:
1656 /* Invalid state change. */
1661 case QED_ROCE_QP_STATE_ERR:
1663 switch (new_state) {
1664 case QED_ROCE_QP_STATE_RESET:
1665 if ((qp->rq.prod != qp->rq.cons) ||
1666 (qp->sq.prod != qp->sq.cons)) {
1668 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1669 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1687 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1688 int attr_mask, struct ib_udata *udata)
1690 struct qedr_qp *qp = get_qedr_qp(ibqp);
1691 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1692 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1693 enum ib_qp_state old_qp_state, new_qp_state;
1696 DP_DEBUG(dev, QEDR_MSG_QP,
1697 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1700 old_qp_state = qedr_get_ibqp_state(qp->state);
1701 if (attr_mask & IB_QP_STATE)
1702 new_qp_state = attr->qp_state;
1704 new_qp_state = old_qp_state;
1706 if (!ib_modify_qp_is_ok
1707 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1708 IB_LINK_LAYER_ETHERNET)) {
1710 "modify qp: invalid attribute mask=0x%x specified for\n"
1711 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1712 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1718 /* Translate the masks... */
1719 if (attr_mask & IB_QP_STATE) {
1720 SET_FIELD(qp_params.modify_flags,
1721 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1722 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1725 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1726 qp_params.sqd_async = true;
1728 if (attr_mask & IB_QP_PKEY_INDEX) {
1729 SET_FIELD(qp_params.modify_flags,
1730 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1731 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1736 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1739 if (attr_mask & IB_QP_QKEY)
1740 qp->qkey = attr->qkey;
1742 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1743 SET_FIELD(qp_params.modify_flags,
1744 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1745 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1746 IB_ACCESS_REMOTE_READ;
1747 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1748 IB_ACCESS_REMOTE_WRITE;
1749 qp_params.incoming_atomic_en = attr->qp_access_flags &
1750 IB_ACCESS_REMOTE_ATOMIC;
1753 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1754 if (attr_mask & IB_QP_PATH_MTU) {
1755 if (attr->path_mtu < IB_MTU_256 ||
1756 attr->path_mtu > IB_MTU_4096) {
1757 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1761 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1762 ib_mtu_enum_to_int(iboe_get_mtu
1768 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1769 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1772 SET_FIELD(qp_params.modify_flags,
1773 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1775 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1776 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1777 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1779 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1781 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1784 "modify qp: problems with GID index %d (rc=%d)\n",
1785 attr->ah_attr.grh.sgid_index, rc);
1789 rc = qedr_get_dmac(dev, &attr->ah_attr,
1790 qp_params.remote_mac_addr);
1794 qp_params.use_local_mac = true;
1795 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1797 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1798 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1799 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1800 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1801 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1802 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1803 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1804 qp_params.remote_mac_addr);
1806 qp_params.mtu = qp->mtu;
1807 qp_params.lb_indication = false;
1810 if (!qp_params.mtu) {
1811 /* Stay with current MTU */
1813 qp_params.mtu = qp->mtu;
1816 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1819 if (attr_mask & IB_QP_TIMEOUT) {
1820 SET_FIELD(qp_params.modify_flags,
1821 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1823 qp_params.ack_timeout = attr->timeout;
1824 if (attr->timeout) {
1827 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1828 /* FW requires [msec] */
1829 qp_params.ack_timeout = temp;
1832 qp_params.ack_timeout = 0;
1835 if (attr_mask & IB_QP_RETRY_CNT) {
1836 SET_FIELD(qp_params.modify_flags,
1837 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1838 qp_params.retry_cnt = attr->retry_cnt;
1841 if (attr_mask & IB_QP_RNR_RETRY) {
1842 SET_FIELD(qp_params.modify_flags,
1843 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1844 qp_params.rnr_retry_cnt = attr->rnr_retry;
1847 if (attr_mask & IB_QP_RQ_PSN) {
1848 SET_FIELD(qp_params.modify_flags,
1849 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1850 qp_params.rq_psn = attr->rq_psn;
1851 qp->rq_psn = attr->rq_psn;
1854 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1855 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1858 "unsupported max_rd_atomic=%d, supported=%d\n",
1859 attr->max_rd_atomic,
1860 dev->attr.max_qp_req_rd_atomic_resc);
1864 SET_FIELD(qp_params.modify_flags,
1865 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1866 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1869 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1870 SET_FIELD(qp_params.modify_flags,
1871 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1872 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1875 if (attr_mask & IB_QP_SQ_PSN) {
1876 SET_FIELD(qp_params.modify_flags,
1877 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1878 qp_params.sq_psn = attr->sq_psn;
1879 qp->sq_psn = attr->sq_psn;
1882 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1883 if (attr->max_dest_rd_atomic >
1884 dev->attr.max_qp_resp_rd_atomic_resc) {
1886 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1887 attr->max_dest_rd_atomic,
1888 dev->attr.max_qp_resp_rd_atomic_resc);
1894 SET_FIELD(qp_params.modify_flags,
1895 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1896 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1899 if (attr_mask & IB_QP_DEST_QPN) {
1900 SET_FIELD(qp_params.modify_flags,
1901 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1903 qp_params.dest_qp = attr->dest_qp_num;
1904 qp->dest_qp_num = attr->dest_qp_num;
1907 if (qp->qp_type != IB_QPT_GSI)
1908 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1909 qp->qed_qp, &qp_params);
1911 if (attr_mask & IB_QP_STATE) {
1912 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1913 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
1914 qp->state = qp_params.new_state;
1921 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1923 int ib_qp_acc_flags = 0;
1925 if (params->incoming_rdma_write_en)
1926 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1927 if (params->incoming_rdma_read_en)
1928 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1929 if (params->incoming_atomic_en)
1930 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1931 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1932 return ib_qp_acc_flags;
1935 int qedr_query_qp(struct ib_qp *ibqp,
1936 struct ib_qp_attr *qp_attr,
1937 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1939 struct qed_rdma_query_qp_out_params params;
1940 struct qedr_qp *qp = get_qedr_qp(ibqp);
1941 struct qedr_dev *dev = qp->dev;
1944 memset(¶ms, 0, sizeof(params));
1946 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
1950 memset(qp_attr, 0, sizeof(*qp_attr));
1951 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
1953 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
1954 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
1955 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
1956 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1957 qp_attr->rq_psn = params.rq_psn;
1958 qp_attr->sq_psn = params.sq_psn;
1959 qp_attr->dest_qp_num = params.dest_qp;
1961 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
1963 qp_attr->cap.max_send_wr = qp->sq.max_wr;
1964 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
1965 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1966 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1967 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
1968 qp_init_attr->cap = qp_attr->cap;
1970 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
1971 sizeof(qp_attr->ah_attr.grh.dgid.raw));
1973 qp_attr->ah_attr.grh.flow_label = params.flow_label;
1974 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1975 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
1976 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
1978 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1979 qp_attr->ah_attr.port_num = 1;
1980 qp_attr->ah_attr.sl = 0;
1981 qp_attr->timeout = params.timeout;
1982 qp_attr->rnr_retry = params.rnr_retry;
1983 qp_attr->retry_cnt = params.retry_cnt;
1984 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
1985 qp_attr->pkey_index = params.pkey_index;
1986 qp_attr->port_num = 1;
1987 qp_attr->ah_attr.src_path_bits = 0;
1988 qp_attr->ah_attr.static_rate = 0;
1989 qp_attr->alt_pkey_index = 0;
1990 qp_attr->alt_port_num = 0;
1991 qp_attr->alt_timeout = 0;
1992 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1994 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
1995 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
1996 qp_attr->max_rd_atomic = params.max_rd_atomic;
1997 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
1999 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2000 qp_attr->cap.max_inline_data);
2006 int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2010 if (qp->qp_type != IB_QPT_GSI) {
2011 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2016 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2017 qedr_cleanup_user(dev, qp);
2019 qedr_cleanup_kernel(dev, qp);
2024 int qedr_destroy_qp(struct ib_qp *ibqp)
2026 struct qedr_qp *qp = get_qedr_qp(ibqp);
2027 struct qedr_dev *dev = qp->dev;
2028 struct ib_qp_attr attr;
2032 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2035 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2036 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2037 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2039 attr.qp_state = IB_QPS_ERR;
2040 attr_mask |= IB_QP_STATE;
2042 /* Change the QP state to ERROR */
2043 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2046 if (qp->qp_type == IB_QPT_GSI)
2047 qedr_destroy_gsi_qp(dev);
2049 qedr_free_qp_resources(dev, qp);
2056 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2057 struct ib_udata *udata)
2061 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2063 return ERR_PTR(-ENOMEM);
2070 int qedr_destroy_ah(struct ib_ah *ibah)
2072 struct qedr_ah *ah = get_qedr_ah(ibah);
2078 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2080 struct qedr_pbl *pbl, *tmp;
2082 if (info->pbl_table)
2083 list_add_tail(&info->pbl_table->list_entry,
2084 &info->free_pbl_list);
2086 if (!list_empty(&info->inuse_pbl_list))
2087 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2089 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2090 list_del(&pbl->list_entry);
2091 qedr_free_pbl(dev, &info->pbl_info, pbl);
2095 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2096 size_t page_list_len, bool two_layered)
2098 struct qedr_pbl *tmp;
2101 INIT_LIST_HEAD(&info->free_pbl_list);
2102 INIT_LIST_HEAD(&info->inuse_pbl_list);
2104 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2105 page_list_len, two_layered);
2109 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2110 if (IS_ERR(info->pbl_table)) {
2111 rc = PTR_ERR(info->pbl_table);
2115 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2116 &info->pbl_table->pa);
2118 /* in usual case we use 2 PBLs, so we add one to free
2119 * list and allocating another one
2121 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2123 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2127 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2129 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2133 free_mr_info(dev, info);
2138 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2139 u64 usr_addr, int acc, struct ib_udata *udata)
2141 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2146 pd = get_qedr_pd(ibpd);
2147 DP_DEBUG(dev, QEDR_MSG_MR,
2148 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2149 pd->pd_id, start, len, usr_addr, acc);
2151 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2152 return ERR_PTR(-EINVAL);
2154 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2158 mr->type = QEDR_MR_USER;
2160 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2161 if (IS_ERR(mr->umem)) {
2166 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2170 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2171 &mr->info.pbl_info);
2173 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2175 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2179 /* Index only, 18 bit long, lkey = itid << 8 | key */
2180 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2182 mr->hw_mr.pd = pd->pd_id;
2183 mr->hw_mr.local_read = 1;
2184 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2185 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2186 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2187 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2188 mr->hw_mr.mw_bind = false;
2189 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2190 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2191 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2192 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2193 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2194 mr->hw_mr.length = len;
2195 mr->hw_mr.vaddr = usr_addr;
2196 mr->hw_mr.zbva = false;
2197 mr->hw_mr.phy_mr = false;
2198 mr->hw_mr.dma_mr = false;
2200 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2202 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2206 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2207 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2208 mr->hw_mr.remote_atomic)
2209 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2211 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2216 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2218 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2224 int qedr_dereg_mr(struct ib_mr *ib_mr)
2226 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2227 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2230 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2234 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2236 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2237 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2239 /* it could be user registered memory. */
2241 ib_umem_release(mr->umem);
2248 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2249 int max_page_list_len)
2251 struct qedr_pd *pd = get_qedr_pd(ibpd);
2252 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2256 DP_DEBUG(dev, QEDR_MSG_MR,
2257 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2260 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2265 mr->type = QEDR_MR_FRMR;
2267 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2271 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2273 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2277 /* Index only, 18 bit long, lkey = itid << 8 | key */
2278 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2280 mr->hw_mr.pd = pd->pd_id;
2281 mr->hw_mr.local_read = 1;
2282 mr->hw_mr.local_write = 0;
2283 mr->hw_mr.remote_read = 0;
2284 mr->hw_mr.remote_write = 0;
2285 mr->hw_mr.remote_atomic = 0;
2286 mr->hw_mr.mw_bind = false;
2287 mr->hw_mr.pbl_ptr = 0;
2288 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2289 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2291 mr->hw_mr.length = 0;
2292 mr->hw_mr.vaddr = 0;
2293 mr->hw_mr.zbva = false;
2294 mr->hw_mr.phy_mr = true;
2295 mr->hw_mr.dma_mr = false;
2297 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2299 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2303 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2304 mr->ibmr.rkey = mr->ibmr.lkey;
2306 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2310 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2316 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2317 enum ib_mr_type mr_type, u32 max_num_sg)
2319 struct qedr_dev *dev;
2322 if (mr_type != IB_MR_TYPE_MEM_REG)
2323 return ERR_PTR(-EINVAL);
2325 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2328 return ERR_PTR(-EINVAL);
2335 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2337 struct qedr_mr *mr = get_qedr_mr(ibmr);
2338 struct qedr_pbl *pbl_table;
2339 struct regpair *pbe;
2342 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2343 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2347 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2350 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2351 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2352 pbe = (struct regpair *)pbl_table->va;
2353 pbe += mr->npages % pbes_in_page;
2354 pbe->lo = cpu_to_le32((u32)addr);
2355 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2362 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2364 int work = info->completed - info->completed_handled - 1;
2366 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2367 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2368 struct qedr_pbl *pbl;
2370 /* Free all the page list that are possible to be freed
2371 * (all the ones that were invalidated), under the assumption
2372 * that if an FMR was completed successfully that means that
2373 * if there was an invalidate operation before it also ended
2375 pbl = list_first_entry(&info->inuse_pbl_list,
2376 struct qedr_pbl, list_entry);
2377 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
2378 info->completed_handled++;
2382 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2383 int sg_nents, unsigned int *sg_offset)
2385 struct qedr_mr *mr = get_qedr_mr(ibmr);
2389 handle_completed_mrs(mr->dev, &mr->info);
2390 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2393 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2395 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2396 struct qedr_pd *pd = get_qedr_pd(ibpd);
2400 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2402 return ERR_PTR(-ENOMEM);
2404 mr->type = QEDR_MR_DMA;
2406 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2408 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2412 /* index only, 18 bit long, lkey = itid << 8 | key */
2413 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2414 mr->hw_mr.pd = pd->pd_id;
2415 mr->hw_mr.local_read = 1;
2416 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2417 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2418 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2419 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2420 mr->hw_mr.dma_mr = true;
2422 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2424 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2428 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2429 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2430 mr->hw_mr.remote_atomic)
2431 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2433 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2437 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2443 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2445 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2448 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2452 for (i = 0; i < num_sge; i++)
2453 len += sg_list[i].length;
2458 static void swap_wqe_data64(u64 *p)
2462 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2463 *p = cpu_to_be64(cpu_to_le64(*p));
2466 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2467 struct qedr_qp *qp, u8 *wqe_size,
2468 struct ib_send_wr *wr,
2469 struct ib_send_wr **bad_wr, u8 *bits,
2472 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2473 char *seg_prt, *wqe;
2476 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2477 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2491 /* Copy data inline */
2492 for (i = 0; i < wr->num_sge; i++) {
2493 u32 len = wr->sg_list[i].length;
2494 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2499 /* New segment required */
2501 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2503 seg_siz = sizeof(struct rdma_sq_common_wqe);
2507 /* Calculate currently allowed length */
2508 cur = min_t(u32, len, seg_siz);
2509 memcpy(seg_prt, src, cur);
2511 /* Update segment variables */
2515 /* Update sge variables */
2519 /* Swap fully-completed segments */
2521 swap_wqe_data64((u64 *)wqe);
2525 /* swap last not completed segment */
2527 swap_wqe_data64((u64 *)wqe);
2532 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2534 DMA_REGPAIR_LE(sge->addr, vaddr); \
2535 (sge)->length = cpu_to_le32(vlength); \
2536 (sge)->flags = cpu_to_le32(vflags); \
2539 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2541 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2542 (hdr)->num_sges = num_sge; \
2545 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2547 DMA_REGPAIR_LE(sge->addr, vaddr); \
2548 (sge)->length = cpu_to_le32(vlength); \
2549 (sge)->l_key = cpu_to_le32(vlkey); \
2552 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2553 struct ib_send_wr *wr)
2558 for (i = 0; i < wr->num_sge; i++) {
2559 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2561 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2562 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2563 sge->length = cpu_to_le32(wr->sg_list[i].length);
2564 data_size += wr->sg_list[i].length;
2568 *wqe_size += wr->num_sge;
2573 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2575 struct rdma_sq_rdma_wqe_1st *rwqe,
2576 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2577 struct ib_send_wr *wr,
2578 struct ib_send_wr **bad_wr)
2580 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2581 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2583 if (wr->send_flags & IB_SEND_INLINE &&
2584 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2585 wr->opcode == IB_WR_RDMA_WRITE)) {
2588 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2589 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2590 bad_wr, &rwqe->flags, flags);
2593 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2596 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2598 struct rdma_sq_send_wqe_1st *swqe,
2599 struct rdma_sq_send_wqe_2st *swqe2,
2600 struct ib_send_wr *wr,
2601 struct ib_send_wr **bad_wr)
2603 memset(swqe2, 0, sizeof(*swqe2));
2604 if (wr->send_flags & IB_SEND_INLINE) {
2607 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2608 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2609 bad_wr, &swqe->flags, flags);
2612 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2615 static int qedr_prepare_reg(struct qedr_qp *qp,
2616 struct rdma_sq_fmr_wqe_1st *fwqe1,
2617 struct ib_reg_wr *wr)
2619 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2620 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2622 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2623 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2624 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2625 fwqe1->l_key = wr->key;
2627 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2628 !!(wr->access & IB_ACCESS_REMOTE_READ));
2629 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2630 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2631 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2632 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2633 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2634 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2635 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2636 fwqe2->fmr_ctrl = 0;
2638 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2639 ilog2(mr->ibmr.page_size) - 12);
2641 fwqe2->length_hi = 0;
2642 fwqe2->length_lo = mr->ibmr.length;
2643 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2644 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2646 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2651 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2654 case IB_WR_RDMA_WRITE:
2655 case IB_WR_RDMA_WRITE_WITH_IMM:
2656 return IB_WC_RDMA_WRITE;
2657 case IB_WR_SEND_WITH_IMM:
2659 case IB_WR_SEND_WITH_INV:
2661 case IB_WR_RDMA_READ:
2662 return IB_WC_RDMA_READ;
2663 case IB_WR_ATOMIC_CMP_AND_SWP:
2664 return IB_WC_COMP_SWAP;
2665 case IB_WR_ATOMIC_FETCH_AND_ADD:
2666 return IB_WC_FETCH_ADD;
2668 return IB_WC_REG_MR;
2669 case IB_WR_LOCAL_INV:
2670 return IB_WC_LOCAL_INV;
2676 static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2678 int wq_is_full, err_wr, pbl_is_full;
2679 struct qedr_dev *dev = qp->dev;
2681 /* prevent SQ overflow and/or processing of a bad WR */
2682 err_wr = wr->num_sge > qp->sq.max_sges;
2683 wq_is_full = qedr_wq_is_full(&qp->sq);
2684 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2685 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2686 if (wq_is_full || err_wr || pbl_is_full) {
2687 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2689 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2691 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2694 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2696 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2698 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2702 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2704 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2706 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2713 static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2714 struct ib_send_wr **bad_wr)
2716 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2717 struct qedr_qp *qp = get_qedr_qp(ibqp);
2718 struct rdma_sq_atomic_wqe_1st *awqe1;
2719 struct rdma_sq_atomic_wqe_2nd *awqe2;
2720 struct rdma_sq_atomic_wqe_3rd *awqe3;
2721 struct rdma_sq_send_wqe_2st *swqe2;
2722 struct rdma_sq_local_inv_wqe *iwqe;
2723 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2724 struct rdma_sq_send_wqe_1st *swqe;
2725 struct rdma_sq_rdma_wqe_1st *rwqe;
2726 struct rdma_sq_fmr_wqe_1st *fwqe1;
2727 struct rdma_sq_common_wqe *wqe;
2732 if (!qedr_can_post_send(qp, wr)) {
2737 wqe = qed_chain_produce(&qp->sq.pbl);
2738 qp->wqe_wr_id[qp->sq.prod].signaled =
2739 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2742 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2743 !!(wr->send_flags & IB_SEND_SOLICITED));
2744 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2745 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2746 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2747 !!(wr->send_flags & IB_SEND_FENCE));
2748 wqe->prev_wqe_size = qp->prev_wqe_size;
2750 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2752 switch (wr->opcode) {
2753 case IB_WR_SEND_WITH_IMM:
2754 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2755 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2757 swqe2 = qed_chain_produce(&qp->sq.pbl);
2759 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2760 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2762 swqe->length = cpu_to_le32(length);
2763 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2764 qp->prev_wqe_size = swqe->wqe_size;
2765 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2768 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2769 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2772 swqe2 = qed_chain_produce(&qp->sq.pbl);
2773 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2775 swqe->length = cpu_to_le32(length);
2776 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2777 qp->prev_wqe_size = swqe->wqe_size;
2778 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2780 case IB_WR_SEND_WITH_INV:
2781 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2782 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2783 swqe2 = qed_chain_produce(&qp->sq.pbl);
2785 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2786 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2788 swqe->length = cpu_to_le32(length);
2789 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2790 qp->prev_wqe_size = swqe->wqe_size;
2791 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2794 case IB_WR_RDMA_WRITE_WITH_IMM:
2795 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2796 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2799 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2800 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2801 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2803 rwqe->length = cpu_to_le32(length);
2804 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2805 qp->prev_wqe_size = rwqe->wqe_size;
2806 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2808 case IB_WR_RDMA_WRITE:
2809 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2810 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2813 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2814 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2816 rwqe->length = cpu_to_le32(length);
2817 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2818 qp->prev_wqe_size = rwqe->wqe_size;
2819 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2821 case IB_WR_RDMA_READ_WITH_INV:
2823 "RDMA READ WITH INVALIDATE not supported\n");
2828 case IB_WR_RDMA_READ:
2829 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2830 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2833 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2834 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2836 rwqe->length = cpu_to_le32(length);
2837 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2838 qp->prev_wqe_size = rwqe->wqe_size;
2839 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2842 case IB_WR_ATOMIC_CMP_AND_SWP:
2843 case IB_WR_ATOMIC_FETCH_AND_ADD:
2844 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2845 awqe1->wqe_size = 4;
2847 awqe2 = qed_chain_produce(&qp->sq.pbl);
2848 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2849 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2851 awqe3 = qed_chain_produce(&qp->sq.pbl);
2853 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2854 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2855 DMA_REGPAIR_LE(awqe3->swap_data,
2856 atomic_wr(wr)->compare_add);
2858 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2859 DMA_REGPAIR_LE(awqe3->swap_data,
2860 atomic_wr(wr)->swap);
2861 DMA_REGPAIR_LE(awqe3->cmp_data,
2862 atomic_wr(wr)->compare_add);
2865 qedr_prepare_sq_sges(qp, NULL, wr);
2867 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2868 qp->prev_wqe_size = awqe1->wqe_size;
2871 case IB_WR_LOCAL_INV:
2872 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2875 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2876 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2877 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2878 qp->prev_wqe_size = iwqe->wqe_size;
2881 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2882 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2883 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2884 fwqe1->wqe_size = 2;
2886 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2888 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2893 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2894 qp->prev_wqe_size = fwqe1->wqe_size;
2897 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2906 /* Restore prod to its position before
2907 * this WR was processed
2909 value = le16_to_cpu(qp->sq.db_data.data.value);
2910 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2912 /* Restore prev_wqe_size */
2913 qp->prev_wqe_size = wqe->prev_wqe_size;
2915 DP_ERR(dev, "POST SEND FAILED\n");
2921 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2922 struct ib_send_wr **bad_wr)
2924 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2925 struct qedr_qp *qp = get_qedr_qp(ibqp);
2926 unsigned long flags;
2931 if (qp->qp_type == IB_QPT_GSI)
2932 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2934 spin_lock_irqsave(&qp->q_lock, flags);
2936 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
2937 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2938 (qp->state != QED_ROCE_QP_STATE_SQD)) {
2939 spin_unlock_irqrestore(&qp->q_lock, flags);
2941 DP_DEBUG(dev, QEDR_MSG_CQ,
2942 "QP in wrong state! QP icid=0x%x state %d\n",
2943 qp->icid, qp->state);
2948 rc = __qedr_post_send(ibqp, wr, bad_wr);
2952 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
2954 qedr_inc_sw_prod(&qp->sq);
2956 qp->sq.db_data.data.value++;
2962 * If there was a failure in the first WR then it will be triggered in
2963 * vane. However this is not harmful (as long as the producer value is
2964 * unchanged). For performance reasons we avoid checking for this
2965 * redundant doorbell.
2968 writel(qp->sq.db_data.raw, qp->sq.db);
2970 /* Make sure write sticks */
2973 spin_unlock_irqrestore(&qp->q_lock, flags);
2978 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2979 struct ib_recv_wr **bad_wr)
2981 struct qedr_qp *qp = get_qedr_qp(ibqp);
2982 struct qedr_dev *dev = qp->dev;
2983 unsigned long flags;
2986 if (qp->qp_type == IB_QPT_GSI)
2987 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
2989 spin_lock_irqsave(&qp->q_lock, flags);
2991 if (qp->state == QED_ROCE_QP_STATE_RESET) {
2992 spin_unlock_irqrestore(&qp->q_lock, flags);
3000 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3001 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3002 wr->num_sge > qp->rq.max_sges) {
3003 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3004 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3005 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3011 for (i = 0; i < wr->num_sge; i++) {
3013 struct rdma_rq_sge *rqe =
3014 qed_chain_produce(&qp->rq.pbl);
3016 /* First one must include the number
3017 * of SGE in the list
3020 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3023 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3024 wr->sg_list[i].lkey);
3026 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3027 wr->sg_list[i].length, flags);
3030 /* Special case of no sges. FW requires between 1-4 sges...
3031 * in this case we need to post 1 sge with length zero. this is
3032 * because rdma write with immediate consumes an RQ.
3036 struct rdma_rq_sge *rqe =
3037 qed_chain_produce(&qp->rq.pbl);
3039 /* First one must include the number
3040 * of SGE in the list
3042 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3043 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3045 RQ_SGE_SET(rqe, 0, 0, flags);
3049 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3050 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3052 qedr_inc_sw_prod(&qp->rq);
3054 /* Flush all the writes before signalling doorbell */
3057 qp->rq.db_data.data.value++;
3059 writel(qp->rq.db_data.raw, qp->rq.db);
3061 /* Make sure write sticks */
3067 spin_unlock_irqrestore(&qp->q_lock, flags);
3072 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3074 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3076 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3080 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3082 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3085 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3086 resp_cqe->qp_handle.lo,
3091 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3093 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3095 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3098 /* Return latest CQE (needs processing) */
3099 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3101 return cq->latest_cqe;
3104 /* In fmr we need to increase the number of fmr completed counter for the fmr
3105 * algorithm determining whether we can free a pbl or not.
3106 * we need to perform this whether the work request was signaled or not. for
3107 * this purpose we call this function from the condition that checks if a wr
3108 * should be skipped, to make sure we don't miss it ( possibly this fmr
3109 * operation was not signalted)
3111 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3113 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3114 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3117 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3118 struct qedr_cq *cq, int num_entries,
3119 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3124 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3125 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3126 qedr_chk_if_fmr(qp);
3132 wc->status = status;
3135 wc->src_qp = qp->id;
3138 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3139 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3141 switch (wc->opcode) {
3142 case IB_WC_RDMA_WRITE:
3143 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3145 case IB_WC_COMP_SWAP:
3146 case IB_WC_FETCH_ADD:
3150 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3160 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3161 qed_chain_consume(&qp->sq.pbl);
3162 qedr_inc_sw_cons(&qp->sq);
3168 static int qedr_poll_cq_req(struct qedr_dev *dev,
3169 struct qedr_qp *qp, struct qedr_cq *cq,
3170 int num_entries, struct ib_wc *wc,
3171 struct rdma_cqe_requester *req)
3175 switch (req->status) {
3176 case RDMA_CQE_REQ_STS_OK:
3177 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3180 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3181 if (qp->state != QED_ROCE_QP_STATE_ERR)
3183 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3184 cq->icid, qp->icid);
3185 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3186 IB_WC_WR_FLUSH_ERR, 1);
3189 /* process all WQE before the cosumer */
3190 qp->state = QED_ROCE_QP_STATE_ERR;
3191 cnt = process_req(dev, qp, cq, num_entries, wc,
3192 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3194 /* if we have extra WC fill it with actual error info */
3195 if (cnt < num_entries) {
3196 enum ib_wc_status wc_status;
3198 switch (req->status) {
3199 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3201 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3202 cq->icid, qp->icid);
3203 wc_status = IB_WC_BAD_RESP_ERR;
3205 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3207 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3208 cq->icid, qp->icid);
3209 wc_status = IB_WC_LOC_LEN_ERR;
3211 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3213 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3214 cq->icid, qp->icid);
3215 wc_status = IB_WC_LOC_QP_OP_ERR;
3217 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3219 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3220 cq->icid, qp->icid);
3221 wc_status = IB_WC_LOC_PROT_ERR;
3223 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3225 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3226 cq->icid, qp->icid);
3227 wc_status = IB_WC_MW_BIND_ERR;
3229 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3231 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3232 cq->icid, qp->icid);
3233 wc_status = IB_WC_REM_INV_REQ_ERR;
3235 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3237 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3238 cq->icid, qp->icid);
3239 wc_status = IB_WC_REM_ACCESS_ERR;
3241 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3243 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3244 cq->icid, qp->icid);
3245 wc_status = IB_WC_REM_OP_ERR;
3247 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3249 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3250 cq->icid, qp->icid);
3251 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3253 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3255 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3256 cq->icid, qp->icid);
3257 wc_status = IB_WC_RETRY_EXC_ERR;
3261 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3262 cq->icid, qp->icid);
3263 wc_status = IB_WC_GENERAL_ERR;
3265 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3273 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3274 struct qedr_cq *cq, struct ib_wc *wc,
3275 struct rdma_cqe_responder *resp, u64 wr_id)
3277 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3280 wc->opcode = IB_WC_RECV;
3283 switch (resp->status) {
3284 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3285 wc_status = IB_WC_LOC_ACCESS_ERR;
3287 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3288 wc_status = IB_WC_LOC_LEN_ERR;
3290 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3291 wc_status = IB_WC_LOC_QP_OP_ERR;
3293 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3294 wc_status = IB_WC_LOC_PROT_ERR;
3296 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3297 wc_status = IB_WC_MW_BIND_ERR;
3299 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3300 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3302 case RDMA_CQE_RESP_STS_OK:
3303 wc_status = IB_WC_SUCCESS;
3304 wc->byte_len = le32_to_cpu(resp->length);
3306 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3308 if (flags == QEDR_RESP_RDMA_IMM)
3309 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3311 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3313 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3314 wc->wc_flags |= IB_WC_WITH_IMM;
3318 wc->status = IB_WC_GENERAL_ERR;
3319 DP_ERR(dev, "Invalid CQE status detected\n");
3323 wc->status = wc_status;
3325 wc->src_qp = qp->id;
3330 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3331 struct qedr_cq *cq, struct ib_wc *wc,
3332 struct rdma_cqe_responder *resp)
3334 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3336 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3338 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3339 qed_chain_consume(&qp->rq.pbl);
3340 qedr_inc_sw_cons(&qp->rq);
3345 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3346 int num_entries, struct ib_wc *wc, u16 hw_cons)
3350 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3352 wc->status = IB_WC_WR_FLUSH_ERR;
3355 wc->src_qp = qp->id;
3357 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3362 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3363 qed_chain_consume(&qp->rq.pbl);
3364 qedr_inc_sw_cons(&qp->rq);
3370 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3371 struct rdma_cqe_responder *resp, int *update)
3373 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3379 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3380 struct qedr_cq *cq, int num_entries,
3381 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3386 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3387 cnt = process_resp_flush(qp, cq, num_entries, wc,
3389 try_consume_resp_cqe(cq, qp, resp, update);
3391 cnt = process_resp_one(dev, qp, cq, wc, resp);
3399 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3400 struct rdma_cqe_requester *req, int *update)
3402 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3408 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3410 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3411 struct qedr_cq *cq = get_qedr_cq(ibcq);
3412 union rdma_cqe *cqe = cq->latest_cqe;
3413 u32 old_cons, new_cons;
3414 unsigned long flags;
3418 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3419 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3421 spin_lock_irqsave(&cq->cq_lock, flags);
3422 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3423 while (num_entries && is_valid_cqe(cq, cqe)) {
3427 /* prevent speculative reads of any field of CQE */
3430 qp = cqe_get_qp(cqe);
3432 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3438 switch (cqe_get_type(cqe)) {
3439 case RDMA_CQE_TYPE_REQUESTER:
3440 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3442 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3444 case RDMA_CQE_TYPE_RESPONDER_RQ:
3445 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3446 &cqe->resp, &update);
3448 case RDMA_CQE_TYPE_INVALID:
3450 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3459 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3461 cq->cq_cons += new_cons - old_cons;
3464 /* doorbell notifies abount latest VALID entry,
3465 * but chain already point to the next INVALID one
3467 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3469 spin_unlock_irqrestore(&cq->cq_lock, flags);
3473 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3475 const struct ib_wc *in_wc,
3476 const struct ib_grh *in_grh,
3477 const struct ib_mad_hdr *mad_hdr,
3478 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3479 size_t *out_mad_size, u16 *out_mad_pkey_index)
3481 struct qedr_dev *dev = get_qedr_dev(ibdev);
3483 DP_DEBUG(dev, QEDR_MSG_GSI,
3484 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3485 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3486 mad_hdr->class_specific, mad_hdr->class_version,
3487 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3488 return IB_MAD_RESULT_SUCCESS;
3491 int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3492 struct ib_port_immutable *immutable)
3494 struct ib_port_attr attr;
3497 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3498 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3500 err = ib_query_port(ibdev, port_num, &attr);
3504 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3505 immutable->gid_tbl_len = attr.gid_tbl_len;
3506 immutable->max_mad_size = IB_MGMT_MAD_SIZE;