1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/qed/common_hsi.h>
47 #include "qedr_hsi_rdma.h"
48 #include <linux/qed/qed_if.h>
51 #include <rdma/qedr-abi.h>
54 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
56 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
58 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
61 *pkey = QEDR_ROCE_PKEY_DEFAULT;
65 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
68 struct qedr_dev *dev = get_qedr_dev(ibdev);
71 if (!rdma_cap_roce_gid_table(ibdev, port))
74 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
76 memcpy(sgid, &zgid, sizeof(*sgid));
80 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
81 sgid->global.interface_id, sgid->global.subnet_prefix);
86 int qedr_add_gid(struct ib_device *device, u8 port_num,
87 unsigned int index, const union ib_gid *gid,
88 const struct ib_gid_attr *attr, void **context)
90 if (!rdma_cap_roce_gid_table(device, port_num))
93 if (port_num > QEDR_MAX_PORT)
102 int qedr_del_gid(struct ib_device *device, u8 port_num,
103 unsigned int index, void **context)
105 if (!rdma_cap_roce_gid_table(device, port_num))
108 if (port_num > QEDR_MAX_PORT)
117 int qedr_query_device(struct ib_device *ibdev,
118 struct ib_device_attr *attr, struct ib_udata *udata)
120 struct qedr_dev *dev = get_qedr_dev(ibdev);
121 struct qedr_device_attr *qattr = &dev->attr;
123 if (!dev->rdma_ctx) {
125 "qedr_query_device called with invalid params rdma_ctx=%p\n",
130 memset(attr, 0, sizeof(*attr));
132 attr->fw_ver = qattr->fw_ver;
133 attr->sys_image_guid = qattr->sys_image_guid;
134 attr->max_mr_size = qattr->max_mr_size;
135 attr->page_size_cap = qattr->page_size_caps;
136 attr->vendor_id = qattr->vendor_id;
137 attr->vendor_part_id = qattr->vendor_part_id;
138 attr->hw_ver = qattr->hw_ver;
139 attr->max_qp = qattr->max_qp;
140 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
141 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
142 IB_DEVICE_RC_RNR_NAK_GEN |
143 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
145 attr->max_sge = qattr->max_sge;
146 attr->max_sge_rd = qattr->max_sge;
147 attr->max_cq = qattr->max_cq;
148 attr->max_cqe = qattr->max_cqe;
149 attr->max_mr = qattr->max_mr;
150 attr->max_mw = qattr->max_mw;
151 attr->max_pd = qattr->max_pd;
152 attr->atomic_cap = dev->atomic_cap;
153 attr->max_fmr = qattr->max_fmr;
154 attr->max_map_per_fmr = 16;
155 attr->max_qp_init_rd_atom =
156 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
157 attr->max_qp_rd_atom =
158 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
159 attr->max_qp_init_rd_atom);
161 attr->max_srq = qattr->max_srq;
162 attr->max_srq_sge = qattr->max_srq_sge;
163 attr->max_srq_wr = qattr->max_srq_wr;
165 attr->local_ca_ack_delay = qattr->dev_ack_delay;
166 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
167 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
168 attr->max_ah = qattr->max_ah;
173 #define QEDR_SPEED_SDR (1)
174 #define QEDR_SPEED_DDR (2)
175 #define QEDR_SPEED_QDR (4)
176 #define QEDR_SPEED_FDR10 (8)
177 #define QEDR_SPEED_FDR (16)
178 #define QEDR_SPEED_EDR (32)
180 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
185 *ib_speed = QEDR_SPEED_SDR;
186 *ib_width = IB_WIDTH_1X;
189 *ib_speed = QEDR_SPEED_QDR;
190 *ib_width = IB_WIDTH_1X;
194 *ib_speed = QEDR_SPEED_DDR;
195 *ib_width = IB_WIDTH_4X;
199 *ib_speed = QEDR_SPEED_EDR;
200 *ib_width = IB_WIDTH_1X;
204 *ib_speed = QEDR_SPEED_QDR;
205 *ib_width = IB_WIDTH_4X;
209 *ib_speed = QEDR_SPEED_QDR;
210 *ib_width = IB_WIDTH_4X;
214 *ib_speed = QEDR_SPEED_EDR;
215 *ib_width = IB_WIDTH_4X;
220 *ib_speed = QEDR_SPEED_SDR;
221 *ib_width = IB_WIDTH_1X;
225 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
227 struct qedr_dev *dev;
228 struct qed_rdma_port *rdma_port;
230 dev = get_qedr_dev(ibdev);
232 DP_ERR(dev, "invalid_port=0x%x\n", port);
236 if (!dev->rdma_ctx) {
237 DP_ERR(dev, "rdma_ctx is NULL\n");
241 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
243 /* *attr being zeroed by the caller, avoid zeroing it here */
244 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
245 attr->state = IB_PORT_ACTIVE;
246 attr->phys_state = 5;
248 attr->state = IB_PORT_DOWN;
249 attr->phys_state = 3;
251 attr->max_mtu = IB_MTU_4096;
252 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
257 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
258 attr->gid_tbl_len = QEDR_MAX_SGID;
259 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
260 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
261 attr->qkey_viol_cntr = 0;
262 get_link_speed_and_width(rdma_port->link_speed,
263 &attr->active_speed, &attr->active_width);
264 attr->max_msg_sz = rdma_port->max_msg_size;
265 attr->max_vl_num = 4;
270 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
271 struct ib_port_modify *props)
273 struct qedr_dev *dev;
275 dev = get_qedr_dev(ibdev);
277 DP_ERR(dev, "invalid_port=0x%x\n", port);
284 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
289 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
293 mm->key.phy_addr = phy_addr;
294 /* This function might be called with a length which is not a multiple
295 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
296 * forces this granularity by increasing the requested size if needed.
297 * When qedr_mmap is called, it will search the list with the updated
298 * length as a key. To prevent search failures, the length is rounded up
299 * in advance to PAGE_SIZE.
301 mm->key.len = roundup(len, PAGE_SIZE);
302 INIT_LIST_HEAD(&mm->entry);
304 mutex_lock(&uctx->mm_list_lock);
305 list_add(&mm->entry, &uctx->mm_head);
306 mutex_unlock(&uctx->mm_list_lock);
308 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
309 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
310 (unsigned long long)mm->key.phy_addr,
311 (unsigned long)mm->key.len, uctx);
316 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
322 mutex_lock(&uctx->mm_list_lock);
323 list_for_each_entry(mm, &uctx->mm_head, entry) {
324 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
330 mutex_unlock(&uctx->mm_list_lock);
331 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
332 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
333 mm->key.phy_addr, mm->key.len, uctx, found);
338 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
339 struct ib_udata *udata)
342 struct qedr_ucontext *ctx;
343 struct qedr_alloc_ucontext_resp uresp;
344 struct qedr_dev *dev = get_qedr_dev(ibdev);
345 struct qed_rdma_add_user_out_params oparams;
348 return ERR_PTR(-EFAULT);
350 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
352 return ERR_PTR(-ENOMEM);
354 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
357 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
362 ctx->dpi = oparams.dpi;
363 ctx->dpi_addr = oparams.dpi_addr;
364 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
365 ctx->dpi_size = oparams.dpi_size;
366 INIT_LIST_HEAD(&ctx->mm_head);
367 mutex_init(&ctx->mm_list_lock);
369 memset(&uresp, 0, sizeof(uresp));
371 uresp.db_pa = ctx->dpi_phys_addr;
372 uresp.db_size = ctx->dpi_size;
373 uresp.max_send_wr = dev->attr.max_sqe;
374 uresp.max_recv_wr = dev->attr.max_rqe;
375 uresp.max_srq_wr = dev->attr.max_srq_wr;
376 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
377 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
378 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
379 uresp.max_cqes = QEDR_MAX_CQES;
381 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
387 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
391 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
393 return &ctx->ibucontext;
400 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
402 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
403 struct qedr_mm *mm, *tmp;
406 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
408 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
410 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
411 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
412 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
413 mm->key.phy_addr, mm->key.len, uctx);
414 list_del(&mm->entry);
422 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
424 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
425 struct qedr_dev *dev = get_qedr_dev(context->device);
426 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
427 u64 unmapped_db = dev->db_phys_addr;
428 unsigned long len = (vma->vm_end - vma->vm_start);
432 DP_DEBUG(dev, QEDR_MSG_INIT,
433 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
434 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
435 if (vma->vm_start & (PAGE_SIZE - 1)) {
436 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
441 found = qedr_search_mmap(ucontext, vm_page, len);
443 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
448 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
450 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
452 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
453 if (vma->vm_flags & VM_READ) {
454 DP_ERR(dev, "Trying to map doorbell bar for read\n");
458 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
460 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
461 PAGE_SIZE, vma->vm_page_prot);
463 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
464 rc = remap_pfn_range(vma, vma->vm_start,
465 vma->vm_pgoff, len, vma->vm_page_prot);
467 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
471 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
472 struct ib_ucontext *context, struct ib_udata *udata)
474 struct qedr_dev *dev = get_qedr_dev(ibdev);
479 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
480 (udata && context) ? "User Lib" : "Kernel");
482 if (!dev->rdma_ctx) {
483 DP_ERR(dev, "invlaid RDMA context\n");
484 return ERR_PTR(-EINVAL);
487 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
489 return ERR_PTR(-ENOMEM);
491 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
497 if (udata && context) {
498 struct qedr_alloc_pd_uresp uresp;
502 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
504 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
505 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
509 pd->uctx = get_qedr_ucontext(context);
520 int qedr_dealloc_pd(struct ib_pd *ibpd)
522 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
523 struct qedr_pd *pd = get_qedr_pd(ibpd);
526 pr_err("Invalid PD received in dealloc_pd\n");
530 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
531 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
538 static void qedr_free_pbl(struct qedr_dev *dev,
539 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
541 struct pci_dev *pdev = dev->pdev;
544 for (i = 0; i < pbl_info->num_pbls; i++) {
547 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
548 pbl[i].va, pbl[i].pa);
554 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
555 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
557 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
558 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
559 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
561 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
562 struct qedr_pbl_info *pbl_info,
565 struct pci_dev *pdev = dev->pdev;
566 struct qedr_pbl *pbl_table;
567 dma_addr_t *pbl_main_tbl;
572 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
574 return ERR_PTR(-ENOMEM);
576 for (i = 0; i < pbl_info->num_pbls; i++) {
577 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
582 memset(va, 0, pbl_info->pbl_size);
583 pbl_table[i].va = va;
584 pbl_table[i].pa = pa;
587 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
588 * the first one with physical pointers to all of the rest
590 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
591 for (i = 0; i < pbl_info->num_pbls - 1; i++)
592 pbl_main_tbl[i] = pbl_table[i + 1].pa;
597 for (i--; i >= 0; i--)
598 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
599 pbl_table[i].va, pbl_table[i].pa);
601 qedr_free_pbl(dev, pbl_info, pbl_table);
603 return ERR_PTR(-ENOMEM);
606 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
607 struct qedr_pbl_info *pbl_info,
608 u32 num_pbes, int two_layer_capable)
614 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
615 if (num_pbes > MAX_PBES_TWO_LAYER) {
616 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
621 /* calculate required pbl page size */
622 pbl_size = MIN_FW_PBL_PAGE_SIZE;
623 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
624 NUM_PBES_ON_PAGE(pbl_size);
626 while (pbl_capacity < num_pbes) {
628 pbl_capacity = pbl_size / sizeof(u64);
629 pbl_capacity = pbl_capacity * pbl_capacity;
632 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
633 num_pbls++; /* One for the layer0 ( points to the pbls) */
634 pbl_info->two_layered = true;
636 /* One layered PBL */
638 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
639 roundup_pow_of_two((num_pbes * sizeof(u64))));
640 pbl_info->two_layered = false;
643 pbl_info->num_pbls = num_pbls;
644 pbl_info->pbl_size = pbl_size;
645 pbl_info->num_pbes = num_pbes;
647 DP_DEBUG(dev, QEDR_MSG_MR,
648 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
649 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
654 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
655 struct qedr_pbl *pbl,
656 struct qedr_pbl_info *pbl_info)
658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
659 struct qedr_pbl *pbl_tbl;
660 struct scatterlist *sg;
665 if (!pbl_info->num_pbes)
668 /* If we have a two layered pbl, the first pbl points to the rest
669 * of the pbls and the first entry lays on the second pbl in the table
671 if (pbl_info->two_layered)
676 pbe = (struct regpair *)pbl_tbl->va;
678 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
684 shift = umem->page_shift;
686 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
687 pages = sg_dma_len(sg) >> shift;
688 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
689 /* store the page address in pbe */
690 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
692 addr = upper_32_bits(sg_dma_address(sg) +
694 pbe->hi = cpu_to_le32(addr);
699 if (total_num_pbes == pbl_info->num_pbes)
702 /* If the given pbl is full storing the pbes,
705 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
707 pbe = (struct regpair *)pbl_tbl->va;
714 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
715 struct qedr_cq *cq, struct ib_udata *udata)
717 struct qedr_create_cq_uresp uresp;
720 memset(&uresp, 0, sizeof(uresp));
722 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
723 uresp.icid = cq->icid;
725 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
727 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
732 static void consume_cqe(struct qedr_cq *cq)
734 if (cq->latest_cqe == cq->toggle_cqe)
735 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
737 cq->latest_cqe = qed_chain_consume(&cq->pbl);
740 static inline int qedr_align_cq_entries(int entries)
742 u64 size, aligned_size;
744 /* We allocate an extra entry that we don't report to the FW. */
745 size = (entries + 1) * QEDR_CQE_SIZE;
746 aligned_size = ALIGN(size, PAGE_SIZE);
748 return aligned_size / QEDR_CQE_SIZE;
751 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
752 struct qedr_dev *dev,
753 struct qedr_userq *q,
754 u64 buf_addr, size_t buf_len,
755 int access, int dmasync)
760 q->buf_addr = buf_addr;
761 q->buf_len = buf_len;
762 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
763 if (IS_ERR(q->umem)) {
764 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
766 return PTR_ERR(q->umem);
769 page_cnt = ib_umem_page_count(q->umem);
770 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
774 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
775 if (IS_ERR(q->pbl_tbl)) {
776 rc = PTR_ERR(q->pbl_tbl);
780 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
785 ib_umem_release(q->umem);
790 static inline void qedr_init_cq_params(struct qedr_cq *cq,
791 struct qedr_ucontext *ctx,
792 struct qedr_dev *dev, int vector,
793 int chain_entries, int page_cnt,
795 struct qed_rdma_create_cq_in_params
798 memset(params, 0, sizeof(*params));
799 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
800 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
801 params->cnq_id = vector;
802 params->cq_size = chain_entries - 1;
803 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
804 params->pbl_num_pages = page_cnt;
805 params->pbl_ptr = pbl_ptr;
806 params->pbl_two_level = 0;
809 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
811 /* Flush data before signalling doorbell */
813 cq->db.data.agg_flags = flags;
814 cq->db.data.value = cpu_to_le32(cons);
815 writeq(cq->db.raw, cq->db_addr);
817 /* Make sure write would stick */
821 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
823 struct qedr_cq *cq = get_qedr_cq(ibcq);
824 unsigned long sflags;
825 struct qedr_dev *dev;
827 dev = get_qedr_dev(ibcq->device);
831 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
837 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
840 spin_lock_irqsave(&cq->cq_lock, sflags);
844 if (flags & IB_CQ_SOLICITED)
845 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
847 if (flags & IB_CQ_NEXT_COMP)
848 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
850 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
852 spin_unlock_irqrestore(&cq->cq_lock, sflags);
857 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
858 const struct ib_cq_init_attr *attr,
859 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
861 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
862 struct qed_rdma_destroy_cq_out_params destroy_oparams;
863 struct qed_rdma_destroy_cq_in_params destroy_iparams;
864 struct qedr_dev *dev = get_qedr_dev(ibdev);
865 struct qed_rdma_create_cq_in_params params;
866 struct qedr_create_cq_ureq ureq;
867 int vector = attr->comp_vector;
868 int entries = attr->cqe;
876 DP_DEBUG(dev, QEDR_MSG_INIT,
877 "create_cq: called from %s. entries=%d, vector=%d\n",
878 udata ? "User Lib" : "Kernel", entries, vector);
880 if (entries > QEDR_MAX_CQES) {
882 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
883 entries, QEDR_MAX_CQES);
884 return ERR_PTR(-EINVAL);
887 chain_entries = qedr_align_cq_entries(entries);
888 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
890 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
892 return ERR_PTR(-ENOMEM);
895 memset(&ureq, 0, sizeof(ureq));
896 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
898 "create cq: problem copying data from user space\n");
904 "create cq: cannot create a cq with 0 entries\n");
908 cq->cq_type = QEDR_CQ_TYPE_USER;
910 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
911 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
915 pbl_ptr = cq->q.pbl_tbl->pa;
916 page_cnt = cq->q.pbl_info.num_pbes;
918 cq->ibcq.cqe = chain_entries;
920 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
922 rc = dev->ops->common->chain_alloc(dev->cdev,
923 QED_CHAIN_USE_TO_CONSUME,
925 QED_CHAIN_CNT_TYPE_U32,
927 sizeof(union rdma_cqe),
932 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
933 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
934 cq->ibcq.cqe = cq->pbl.capacity;
937 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
940 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
945 cq->sig = QEDR_CQ_MAGIC_NUMBER;
946 spin_lock_init(&cq->cq_lock);
949 rc = qedr_copy_cq_uresp(dev, cq, udata);
953 /* Generate doorbell address. */
954 cq->db_addr = dev->db_addr +
955 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
956 cq->db.data.icid = cq->icid;
957 cq->db.data.params = DB_AGG_CMD_SET <<
958 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
960 /* point to the very last element, passing it we will toggle */
961 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
962 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
963 cq->latest_cqe = NULL;
965 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
968 DP_DEBUG(dev, QEDR_MSG_CQ,
969 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
970 cq->icid, cq, params.cq_size);
975 destroy_iparams.icid = cq->icid;
976 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
980 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
982 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
985 ib_umem_release(cq->q.umem);
988 return ERR_PTR(-EINVAL);
991 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
993 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
994 struct qedr_cq *cq = get_qedr_cq(ibcq);
996 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1001 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1002 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
1004 int qedr_destroy_cq(struct ib_cq *ibcq)
1006 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1007 struct qed_rdma_destroy_cq_out_params oparams;
1008 struct qed_rdma_destroy_cq_in_params iparams;
1009 struct qedr_cq *cq = get_qedr_cq(ibcq);
1013 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1017 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1018 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1021 iparams.icid = cq->icid;
1022 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1026 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1028 if (ibcq->uobject && ibcq->uobject->context) {
1029 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1030 ib_umem_release(cq->q.umem);
1033 /* We don't want the IRQ handler to handle a non-existing CQ so we
1034 * wait until all CNQ interrupts, if any, are received. This will always
1035 * happen and will always happen very fast. If not, then a serious error
1036 * has occured. That is why we can use a long delay.
1037 * We spin for a short time so we don’t lose time on context switching
1038 * in case all the completions are handled in that span. Otherwise
1039 * we sleep for a while and check again. Since the CNQ may be
1040 * associated with (only) the current CPU we use msleep to allow the
1041 * current CPU to be freed.
1042 * The CNQ notification is increased in qedr_irq_handler().
1044 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1045 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1046 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1050 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1051 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1052 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1056 if (oparams.num_cq_notif != cq->cnq_notif)
1059 /* Note that we don't need to have explicit code to wait for the
1060 * completion of the event handler because it is invoked from the EQ.
1061 * Since the destroy CQ ramrod has also been received on the EQ we can
1062 * be certain that there's no event handler in process.
1073 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1074 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1079 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1080 struct ib_qp_attr *attr,
1082 struct qed_rdma_modify_qp_in_params
1085 enum rdma_network_type nw_type;
1086 struct ib_gid_attr gid_attr;
1087 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1093 rc = ib_get_cached_gid(ibqp->device,
1094 rdma_ah_get_port_num(&attr->ah_attr),
1095 grh->sgid_index, &gid, &gid_attr);
1099 if (!memcmp(&gid, &zgid, sizeof(gid)))
1102 if (gid_attr.ndev) {
1103 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1105 dev_put(gid_attr.ndev);
1106 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1108 case RDMA_NETWORK_IPV6:
1109 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1110 sizeof(qp_params->sgid));
1111 memcpy(&qp_params->dgid.bytes[0],
1113 sizeof(qp_params->dgid));
1114 qp_params->roce_mode = ROCE_V2_IPV6;
1115 SET_FIELD(qp_params->modify_flags,
1116 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1118 case RDMA_NETWORK_IB:
1119 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1120 sizeof(qp_params->sgid));
1121 memcpy(&qp_params->dgid.bytes[0],
1123 sizeof(qp_params->dgid));
1124 qp_params->roce_mode = ROCE_V1;
1126 case RDMA_NETWORK_IPV4:
1127 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1128 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1129 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1130 qp_params->sgid.ipv4_addr = ipv4_addr;
1132 qedr_get_ipv4_from_gid(grh->dgid.raw);
1133 qp_params->dgid.ipv4_addr = ipv4_addr;
1134 SET_FIELD(qp_params->modify_flags,
1135 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1136 qp_params->roce_mode = ROCE_V2_IPV4;
1141 for (i = 0; i < 4; i++) {
1142 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1143 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1146 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1147 qp_params->vlan_id = 0;
1152 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1153 struct ib_qp_init_attr *attrs)
1155 struct qedr_device_attr *qattr = &dev->attr;
1157 /* QP0... attrs->qp_type == IB_QPT_GSI */
1158 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1159 DP_DEBUG(dev, QEDR_MSG_QP,
1160 "create qp: unsupported qp type=0x%x requested\n",
1165 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1167 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1168 attrs->cap.max_send_wr, qattr->max_sqe);
1172 if (attrs->cap.max_inline_data > qattr->max_inline) {
1174 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1175 attrs->cap.max_inline_data, qattr->max_inline);
1179 if (attrs->cap.max_send_sge > qattr->max_sge) {
1181 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1182 attrs->cap.max_send_sge, qattr->max_sge);
1186 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1188 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1189 attrs->cap.max_recv_sge, qattr->max_sge);
1193 /* Unprivileged user space cannot create special QP */
1194 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1196 "create qp: userspace can't create special QPs of type=0x%x\n",
1204 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1207 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1208 uresp->rq_icid = qp->icid;
1211 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1214 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1215 uresp->sq_icid = qp->icid + 1;
1218 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1219 struct qedr_qp *qp, struct ib_udata *udata)
1221 struct qedr_create_qp_uresp uresp;
1224 memset(&uresp, 0, sizeof(uresp));
1225 qedr_copy_sq_uresp(&uresp, qp);
1226 qedr_copy_rq_uresp(&uresp, qp);
1228 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1229 uresp.qp_id = qp->qp_id;
1231 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1234 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1240 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1243 struct ib_qp_init_attr *attrs)
1245 spin_lock_init(&qp->q_lock);
1247 qp->qp_type = attrs->qp_type;
1248 qp->max_inline_data = attrs->cap.max_inline_data;
1249 qp->sq.max_sges = attrs->cap.max_send_sge;
1250 qp->state = QED_ROCE_QP_STATE_RESET;
1251 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1252 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1253 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1255 qp->rq.max_sges = attrs->cap.max_recv_sge;
1257 DP_DEBUG(dev, QEDR_MSG_QP,
1258 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1259 qp->rq.max_sges, qp->rq_cq->icid);
1260 DP_DEBUG(dev, QEDR_MSG_QP,
1261 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1262 pd->pd_id, qp->qp_type, qp->max_inline_data,
1263 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1264 DP_DEBUG(dev, QEDR_MSG_QP,
1265 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1266 qp->sq.max_sges, qp->sq_cq->icid);
1269 static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1271 qp->sq.db = dev->db_addr +
1272 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1273 qp->sq.db_data.data.icid = qp->icid + 1;
1274 qp->rq.db = dev->db_addr +
1275 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1276 qp->rq.db_data.data.icid = qp->icid;
1280 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1283 struct ib_qp_init_attr *attrs,
1284 bool fmr_and_reserved_lkey,
1285 struct qed_rdma_create_qp_in_params *params)
1287 /* QP handle to be written in an async event */
1288 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1289 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1291 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1292 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1293 params->pd = pd->pd_id;
1294 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1295 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1296 params->stats_queue = 0;
1297 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1299 params->use_srq = false;
1302 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1304 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1313 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1316 static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1319 ib_umem_release(qp->usq.umem);
1320 qp->usq.umem = NULL;
1323 ib_umem_release(qp->urq.umem);
1324 qp->urq.umem = NULL;
1327 static int qedr_create_user_qp(struct qedr_dev *dev,
1330 struct ib_udata *udata,
1331 struct ib_qp_init_attr *attrs)
1333 struct qed_rdma_create_qp_in_params in_params;
1334 struct qed_rdma_create_qp_out_params out_params;
1335 struct qedr_pd *pd = get_qedr_pd(ibpd);
1336 struct ib_ucontext *ib_ctx = NULL;
1337 struct qedr_ucontext *ctx = NULL;
1338 struct qedr_create_qp_ureq ureq;
1341 ib_ctx = ibpd->uobject->context;
1342 ctx = get_qedr_ucontext(ib_ctx);
1344 memset(&ureq, 0, sizeof(ureq));
1345 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1347 DP_ERR(dev, "Problem copying data from user space\n");
1351 /* SQ - read access only (0), dma sync not required (0) */
1352 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1357 /* RQ - read access only (0), dma sync not required (0) */
1358 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1364 memset(&in_params, 0, sizeof(in_params));
1365 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1366 in_params.qp_handle_lo = ureq.qp_handle_lo;
1367 in_params.qp_handle_hi = ureq.qp_handle_hi;
1368 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1369 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1370 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1371 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1373 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1374 &in_params, &out_params);
1381 qp->qp_id = out_params.qp_id;
1382 qp->icid = out_params.icid;
1384 rc = qedr_copy_qp_uresp(dev, qp, udata);
1388 qedr_qp_user_print(dev, qp);
1392 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1394 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1397 qedr_cleanup_user(dev, qp);
1402 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1404 struct qed_rdma_create_qp_in_params *in_params,
1405 u32 n_sq_elems, u32 n_rq_elems)
1407 struct qed_rdma_create_qp_out_params out_params;
1410 rc = dev->ops->common->chain_alloc(dev->cdev,
1411 QED_CHAIN_USE_TO_PRODUCE,
1413 QED_CHAIN_CNT_TYPE_U32,
1415 QEDR_SQE_ELEMENT_SIZE,
1421 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1422 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1424 rc = dev->ops->common->chain_alloc(dev->cdev,
1425 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1427 QED_CHAIN_CNT_TYPE_U32,
1429 QEDR_RQE_ELEMENT_SIZE,
1434 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1435 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1437 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1438 in_params, &out_params);
1443 qp->qp_id = out_params.qp_id;
1444 qp->icid = out_params.icid;
1446 qedr_set_roce_db_info(dev, qp);
1451 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
1453 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1454 kfree(qp->wqe_wr_id);
1456 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1457 kfree(qp->rqe_wr_id);
1460 static int qedr_create_kernel_qp(struct qedr_dev *dev,
1463 struct ib_qp_init_attr *attrs)
1465 struct qed_rdma_create_qp_in_params in_params;
1466 struct qedr_pd *pd = get_qedr_pd(ibpd);
1472 memset(&in_params, 0, sizeof(in_params));
1474 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1475 * the ring. The ring should allow at least a single WR, even if the
1476 * user requested none, due to allocation issues.
1477 * We should add an extra WR since the prod and cons indices of
1478 * wqe_wr_id are managed in such a way that the WQ is considered full
1479 * when (prod+1)%max_wr==cons. We currently don't do that because we
1480 * double the number of entries due an iSER issue that pushes far more
1481 * WRs than indicated. If we decline its ib_post_send() then we get
1482 * error prints in the dmesg we'd like to avoid.
1484 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1487 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1489 if (!qp->wqe_wr_id) {
1490 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1494 /* QP handle to be written in CQE */
1495 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1496 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
1498 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1499 * the ring. There ring should allow at least a single WR, even if the
1500 * user requested none, due to allocation issues.
1502 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1504 /* Allocate driver internal RQ array */
1505 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1507 if (!qp->rqe_wr_id) {
1509 "create qp: failed RQ shadow memory allocation\n");
1510 kfree(qp->wqe_wr_id);
1514 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
1516 n_sq_entries = attrs->cap.max_send_wr;
1517 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1518 n_sq_entries = max_t(u32, n_sq_entries, 1);
1519 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1521 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1523 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1524 n_sq_elems, n_rq_elems);
1526 qedr_cleanup_kernel(dev, qp);
1531 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1532 struct ib_qp_init_attr *attrs,
1533 struct ib_udata *udata)
1535 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1536 struct qedr_pd *pd = get_qedr_pd(ibpd);
1541 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1542 udata ? "user library" : "kernel", pd);
1544 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1549 return ERR_PTR(-EINVAL);
1551 DP_DEBUG(dev, QEDR_MSG_QP,
1552 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1553 udata ? "user library" : "kernel", attrs->event_handler, pd,
1554 get_qedr_cq(attrs->send_cq),
1555 get_qedr_cq(attrs->send_cq)->icid,
1556 get_qedr_cq(attrs->recv_cq),
1557 get_qedr_cq(attrs->recv_cq)->icid);
1559 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1561 DP_ERR(dev, "create qp: failed allocating memory\n");
1562 return ERR_PTR(-ENOMEM);
1565 qedr_set_common_qp_params(dev, qp, pd, attrs);
1567 if (attrs->qp_type == IB_QPT_GSI) {
1568 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1575 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1577 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
1582 qp->ibqp.qp_num = qp->qp_id;
1589 return ERR_PTR(-EFAULT);
1592 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1595 case QED_ROCE_QP_STATE_RESET:
1596 return IB_QPS_RESET;
1597 case QED_ROCE_QP_STATE_INIT:
1599 case QED_ROCE_QP_STATE_RTR:
1601 case QED_ROCE_QP_STATE_RTS:
1603 case QED_ROCE_QP_STATE_SQD:
1605 case QED_ROCE_QP_STATE_ERR:
1607 case QED_ROCE_QP_STATE_SQE:
1613 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1614 enum ib_qp_state qp_state)
1618 return QED_ROCE_QP_STATE_RESET;
1620 return QED_ROCE_QP_STATE_INIT;
1622 return QED_ROCE_QP_STATE_RTR;
1624 return QED_ROCE_QP_STATE_RTS;
1626 return QED_ROCE_QP_STATE_SQD;
1628 return QED_ROCE_QP_STATE_ERR;
1630 return QED_ROCE_QP_STATE_ERR;
1634 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1636 qed_chain_reset(&qph->pbl);
1640 qph->db_data.data.value = cpu_to_le16(0);
1643 static int qedr_update_qp_state(struct qedr_dev *dev,
1645 enum qed_roce_qp_state new_state)
1649 if (new_state == qp->state)
1652 switch (qp->state) {
1653 case QED_ROCE_QP_STATE_RESET:
1654 switch (new_state) {
1655 case QED_ROCE_QP_STATE_INIT:
1656 qp->prev_wqe_size = 0;
1657 qedr_reset_qp_hwq_info(&qp->sq);
1658 qedr_reset_qp_hwq_info(&qp->rq);
1665 case QED_ROCE_QP_STATE_INIT:
1666 switch (new_state) {
1667 case QED_ROCE_QP_STATE_RTR:
1668 /* Update doorbell (in case post_recv was
1669 * done before move to RTR)
1672 writel(qp->rq.db_data.raw, qp->rq.db);
1673 /* Make sure write takes effect */
1676 case QED_ROCE_QP_STATE_ERR:
1679 /* Invalid state change. */
1684 case QED_ROCE_QP_STATE_RTR:
1686 switch (new_state) {
1687 case QED_ROCE_QP_STATE_RTS:
1689 case QED_ROCE_QP_STATE_ERR:
1692 /* Invalid state change. */
1697 case QED_ROCE_QP_STATE_RTS:
1699 switch (new_state) {
1700 case QED_ROCE_QP_STATE_SQD:
1702 case QED_ROCE_QP_STATE_ERR:
1705 /* Invalid state change. */
1710 case QED_ROCE_QP_STATE_SQD:
1712 switch (new_state) {
1713 case QED_ROCE_QP_STATE_RTS:
1714 case QED_ROCE_QP_STATE_ERR:
1717 /* Invalid state change. */
1722 case QED_ROCE_QP_STATE_ERR:
1724 switch (new_state) {
1725 case QED_ROCE_QP_STATE_RESET:
1726 if ((qp->rq.prod != qp->rq.cons) ||
1727 (qp->sq.prod != qp->sq.cons)) {
1729 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1730 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1748 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1749 int attr_mask, struct ib_udata *udata)
1751 struct qedr_qp *qp = get_qedr_qp(ibqp);
1752 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1753 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1754 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1755 enum ib_qp_state old_qp_state, new_qp_state;
1758 DP_DEBUG(dev, QEDR_MSG_QP,
1759 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1762 old_qp_state = qedr_get_ibqp_state(qp->state);
1763 if (attr_mask & IB_QP_STATE)
1764 new_qp_state = attr->qp_state;
1766 new_qp_state = old_qp_state;
1768 if (!ib_modify_qp_is_ok
1769 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1770 IB_LINK_LAYER_ETHERNET)) {
1772 "modify qp: invalid attribute mask=0x%x specified for\n"
1773 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1774 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1780 /* Translate the masks... */
1781 if (attr_mask & IB_QP_STATE) {
1782 SET_FIELD(qp_params.modify_flags,
1783 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1784 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1787 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1788 qp_params.sqd_async = true;
1790 if (attr_mask & IB_QP_PKEY_INDEX) {
1791 SET_FIELD(qp_params.modify_flags,
1792 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1793 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1798 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1801 if (attr_mask & IB_QP_QKEY)
1802 qp->qkey = attr->qkey;
1804 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1805 SET_FIELD(qp_params.modify_flags,
1806 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1807 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1808 IB_ACCESS_REMOTE_READ;
1809 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1810 IB_ACCESS_REMOTE_WRITE;
1811 qp_params.incoming_atomic_en = attr->qp_access_flags &
1812 IB_ACCESS_REMOTE_ATOMIC;
1815 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1816 if (attr_mask & IB_QP_PATH_MTU) {
1817 if (attr->path_mtu < IB_MTU_256 ||
1818 attr->path_mtu > IB_MTU_4096) {
1819 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1823 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1824 ib_mtu_enum_to_int(iboe_get_mtu
1830 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1831 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1834 SET_FIELD(qp_params.modify_flags,
1835 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1837 qp_params.traffic_class_tos = grh->traffic_class;
1838 qp_params.flow_label = grh->flow_label;
1839 qp_params.hop_limit_ttl = grh->hop_limit;
1841 qp->sgid_idx = grh->sgid_index;
1843 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1846 "modify qp: problems with GID index %d (rc=%d)\n",
1847 grh->sgid_index, rc);
1851 rc = qedr_get_dmac(dev, &attr->ah_attr,
1852 qp_params.remote_mac_addr);
1856 qp_params.use_local_mac = true;
1857 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1859 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1860 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1861 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1862 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1863 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1864 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1865 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1866 qp_params.remote_mac_addr);
1868 qp_params.mtu = qp->mtu;
1869 qp_params.lb_indication = false;
1872 if (!qp_params.mtu) {
1873 /* Stay with current MTU */
1875 qp_params.mtu = qp->mtu;
1878 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1881 if (attr_mask & IB_QP_TIMEOUT) {
1882 SET_FIELD(qp_params.modify_flags,
1883 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1885 qp_params.ack_timeout = attr->timeout;
1886 if (attr->timeout) {
1889 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1890 /* FW requires [msec] */
1891 qp_params.ack_timeout = temp;
1894 qp_params.ack_timeout = 0;
1897 if (attr_mask & IB_QP_RETRY_CNT) {
1898 SET_FIELD(qp_params.modify_flags,
1899 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1900 qp_params.retry_cnt = attr->retry_cnt;
1903 if (attr_mask & IB_QP_RNR_RETRY) {
1904 SET_FIELD(qp_params.modify_flags,
1905 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1906 qp_params.rnr_retry_cnt = attr->rnr_retry;
1909 if (attr_mask & IB_QP_RQ_PSN) {
1910 SET_FIELD(qp_params.modify_flags,
1911 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1912 qp_params.rq_psn = attr->rq_psn;
1913 qp->rq_psn = attr->rq_psn;
1916 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1917 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1920 "unsupported max_rd_atomic=%d, supported=%d\n",
1921 attr->max_rd_atomic,
1922 dev->attr.max_qp_req_rd_atomic_resc);
1926 SET_FIELD(qp_params.modify_flags,
1927 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1928 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1931 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1932 SET_FIELD(qp_params.modify_flags,
1933 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1934 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1937 if (attr_mask & IB_QP_SQ_PSN) {
1938 SET_FIELD(qp_params.modify_flags,
1939 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1940 qp_params.sq_psn = attr->sq_psn;
1941 qp->sq_psn = attr->sq_psn;
1944 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1945 if (attr->max_dest_rd_atomic >
1946 dev->attr.max_qp_resp_rd_atomic_resc) {
1948 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1949 attr->max_dest_rd_atomic,
1950 dev->attr.max_qp_resp_rd_atomic_resc);
1956 SET_FIELD(qp_params.modify_flags,
1957 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1958 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1961 if (attr_mask & IB_QP_DEST_QPN) {
1962 SET_FIELD(qp_params.modify_flags,
1963 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1965 qp_params.dest_qp = attr->dest_qp_num;
1966 qp->dest_qp_num = attr->dest_qp_num;
1969 if (qp->qp_type != IB_QPT_GSI)
1970 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1971 qp->qed_qp, &qp_params);
1973 if (attr_mask & IB_QP_STATE) {
1974 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1975 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
1976 qp->state = qp_params.new_state;
1983 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1985 int ib_qp_acc_flags = 0;
1987 if (params->incoming_rdma_write_en)
1988 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1989 if (params->incoming_rdma_read_en)
1990 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1991 if (params->incoming_atomic_en)
1992 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1993 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1994 return ib_qp_acc_flags;
1997 int qedr_query_qp(struct ib_qp *ibqp,
1998 struct ib_qp_attr *qp_attr,
1999 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2001 struct qed_rdma_query_qp_out_params params;
2002 struct qedr_qp *qp = get_qedr_qp(ibqp);
2003 struct qedr_dev *dev = qp->dev;
2006 memset(¶ms, 0, sizeof(params));
2008 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2012 memset(qp_attr, 0, sizeof(*qp_attr));
2013 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2015 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2016 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2017 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2018 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2019 qp_attr->rq_psn = params.rq_psn;
2020 qp_attr->sq_psn = params.sq_psn;
2021 qp_attr->dest_qp_num = params.dest_qp;
2023 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2025 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2026 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2027 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2028 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2029 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2030 qp_init_attr->cap = qp_attr->cap;
2032 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2033 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2034 params.flow_label, qp->sgid_idx,
2035 params.hop_limit_ttl, params.traffic_class_tos);
2036 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2037 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2038 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2039 qp_attr->timeout = params.timeout;
2040 qp_attr->rnr_retry = params.rnr_retry;
2041 qp_attr->retry_cnt = params.retry_cnt;
2042 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2043 qp_attr->pkey_index = params.pkey_index;
2044 qp_attr->port_num = 1;
2045 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2046 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2047 qp_attr->alt_pkey_index = 0;
2048 qp_attr->alt_port_num = 0;
2049 qp_attr->alt_timeout = 0;
2050 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2052 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2053 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2054 qp_attr->max_rd_atomic = params.max_rd_atomic;
2055 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2057 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2058 qp_attr->cap.max_inline_data);
2064 int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2068 if (qp->qp_type != IB_QPT_GSI) {
2069 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2074 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2075 qedr_cleanup_user(dev, qp);
2077 qedr_cleanup_kernel(dev, qp);
2082 int qedr_destroy_qp(struct ib_qp *ibqp)
2084 struct qedr_qp *qp = get_qedr_qp(ibqp);
2085 struct qedr_dev *dev = qp->dev;
2086 struct ib_qp_attr attr;
2090 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2093 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2094 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2095 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2097 attr.qp_state = IB_QPS_ERR;
2098 attr_mask |= IB_QP_STATE;
2100 /* Change the QP state to ERROR */
2101 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2104 if (qp->qp_type == IB_QPT_GSI)
2105 qedr_destroy_gsi_qp(dev);
2107 qedr_free_qp_resources(dev, qp);
2114 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
2115 struct ib_udata *udata)
2119 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2121 return ERR_PTR(-ENOMEM);
2128 int qedr_destroy_ah(struct ib_ah *ibah)
2130 struct qedr_ah *ah = get_qedr_ah(ibah);
2136 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2138 struct qedr_pbl *pbl, *tmp;
2140 if (info->pbl_table)
2141 list_add_tail(&info->pbl_table->list_entry,
2142 &info->free_pbl_list);
2144 if (!list_empty(&info->inuse_pbl_list))
2145 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2147 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2148 list_del(&pbl->list_entry);
2149 qedr_free_pbl(dev, &info->pbl_info, pbl);
2153 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2154 size_t page_list_len, bool two_layered)
2156 struct qedr_pbl *tmp;
2159 INIT_LIST_HEAD(&info->free_pbl_list);
2160 INIT_LIST_HEAD(&info->inuse_pbl_list);
2162 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2163 page_list_len, two_layered);
2167 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2168 if (IS_ERR(info->pbl_table)) {
2169 rc = PTR_ERR(info->pbl_table);
2173 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2174 &info->pbl_table->pa);
2176 /* in usual case we use 2 PBLs, so we add one to free
2177 * list and allocating another one
2179 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2181 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2185 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2187 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2191 free_mr_info(dev, info);
2196 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2197 u64 usr_addr, int acc, struct ib_udata *udata)
2199 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2204 pd = get_qedr_pd(ibpd);
2205 DP_DEBUG(dev, QEDR_MSG_MR,
2206 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2207 pd->pd_id, start, len, usr_addr, acc);
2209 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2210 return ERR_PTR(-EINVAL);
2212 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2216 mr->type = QEDR_MR_USER;
2218 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2219 if (IS_ERR(mr->umem)) {
2224 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2228 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2229 &mr->info.pbl_info);
2231 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2233 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2237 /* Index only, 18 bit long, lkey = itid << 8 | key */
2238 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2240 mr->hw_mr.pd = pd->pd_id;
2241 mr->hw_mr.local_read = 1;
2242 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2243 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2244 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2245 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2246 mr->hw_mr.mw_bind = false;
2247 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2248 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2249 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2250 mr->hw_mr.page_size_log = mr->umem->page_shift;
2251 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2252 mr->hw_mr.length = len;
2253 mr->hw_mr.vaddr = usr_addr;
2254 mr->hw_mr.zbva = false;
2255 mr->hw_mr.phy_mr = false;
2256 mr->hw_mr.dma_mr = false;
2258 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2260 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2264 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2265 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2266 mr->hw_mr.remote_atomic)
2267 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2269 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2274 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2276 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2282 int qedr_dereg_mr(struct ib_mr *ib_mr)
2284 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2285 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2288 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2292 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2294 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2295 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2297 /* it could be user registered memory. */
2299 ib_umem_release(mr->umem);
2306 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2307 int max_page_list_len)
2309 struct qedr_pd *pd = get_qedr_pd(ibpd);
2310 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2314 DP_DEBUG(dev, QEDR_MSG_MR,
2315 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2318 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2323 mr->type = QEDR_MR_FRMR;
2325 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2329 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2331 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2335 /* Index only, 18 bit long, lkey = itid << 8 | key */
2336 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2338 mr->hw_mr.pd = pd->pd_id;
2339 mr->hw_mr.local_read = 1;
2340 mr->hw_mr.local_write = 0;
2341 mr->hw_mr.remote_read = 0;
2342 mr->hw_mr.remote_write = 0;
2343 mr->hw_mr.remote_atomic = 0;
2344 mr->hw_mr.mw_bind = false;
2345 mr->hw_mr.pbl_ptr = 0;
2346 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2347 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2349 mr->hw_mr.length = 0;
2350 mr->hw_mr.vaddr = 0;
2351 mr->hw_mr.zbva = false;
2352 mr->hw_mr.phy_mr = true;
2353 mr->hw_mr.dma_mr = false;
2355 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2357 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2361 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2362 mr->ibmr.rkey = mr->ibmr.lkey;
2364 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2368 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2374 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2375 enum ib_mr_type mr_type, u32 max_num_sg)
2377 struct qedr_dev *dev;
2380 if (mr_type != IB_MR_TYPE_MEM_REG)
2381 return ERR_PTR(-EINVAL);
2383 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2386 return ERR_PTR(-EINVAL);
2393 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2395 struct qedr_mr *mr = get_qedr_mr(ibmr);
2396 struct qedr_pbl *pbl_table;
2397 struct regpair *pbe;
2400 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2401 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2405 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2408 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2409 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2410 pbe = (struct regpair *)pbl_table->va;
2411 pbe += mr->npages % pbes_in_page;
2412 pbe->lo = cpu_to_le32((u32)addr);
2413 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2420 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2422 int work = info->completed - info->completed_handled - 1;
2424 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2425 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2426 struct qedr_pbl *pbl;
2428 /* Free all the page list that are possible to be freed
2429 * (all the ones that were invalidated), under the assumption
2430 * that if an FMR was completed successfully that means that
2431 * if there was an invalidate operation before it also ended
2433 pbl = list_first_entry(&info->inuse_pbl_list,
2434 struct qedr_pbl, list_entry);
2435 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
2436 info->completed_handled++;
2440 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2441 int sg_nents, unsigned int *sg_offset)
2443 struct qedr_mr *mr = get_qedr_mr(ibmr);
2447 handle_completed_mrs(mr->dev, &mr->info);
2448 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2451 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2453 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2454 struct qedr_pd *pd = get_qedr_pd(ibpd);
2458 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2460 return ERR_PTR(-ENOMEM);
2462 mr->type = QEDR_MR_DMA;
2464 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2466 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2470 /* index only, 18 bit long, lkey = itid << 8 | key */
2471 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2472 mr->hw_mr.pd = pd->pd_id;
2473 mr->hw_mr.local_read = 1;
2474 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2475 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2476 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2477 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2478 mr->hw_mr.dma_mr = true;
2480 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2482 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2486 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2487 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2488 mr->hw_mr.remote_atomic)
2489 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2491 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2495 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2501 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2503 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2506 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2510 for (i = 0; i < num_sge; i++)
2511 len += sg_list[i].length;
2516 static void swap_wqe_data64(u64 *p)
2520 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2521 *p = cpu_to_be64(cpu_to_le64(*p));
2524 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2525 struct qedr_qp *qp, u8 *wqe_size,
2526 struct ib_send_wr *wr,
2527 struct ib_send_wr **bad_wr, u8 *bits,
2530 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2531 char *seg_prt, *wqe;
2534 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2535 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2549 /* Copy data inline */
2550 for (i = 0; i < wr->num_sge; i++) {
2551 u32 len = wr->sg_list[i].length;
2552 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2557 /* New segment required */
2559 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2561 seg_siz = sizeof(struct rdma_sq_common_wqe);
2565 /* Calculate currently allowed length */
2566 cur = min_t(u32, len, seg_siz);
2567 memcpy(seg_prt, src, cur);
2569 /* Update segment variables */
2573 /* Update sge variables */
2577 /* Swap fully-completed segments */
2579 swap_wqe_data64((u64 *)wqe);
2583 /* swap last not completed segment */
2585 swap_wqe_data64((u64 *)wqe);
2590 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2592 DMA_REGPAIR_LE(sge->addr, vaddr); \
2593 (sge)->length = cpu_to_le32(vlength); \
2594 (sge)->flags = cpu_to_le32(vflags); \
2597 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2599 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2600 (hdr)->num_sges = num_sge; \
2603 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2605 DMA_REGPAIR_LE(sge->addr, vaddr); \
2606 (sge)->length = cpu_to_le32(vlength); \
2607 (sge)->l_key = cpu_to_le32(vlkey); \
2610 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2611 struct ib_send_wr *wr)
2616 for (i = 0; i < wr->num_sge; i++) {
2617 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2619 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2620 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2621 sge->length = cpu_to_le32(wr->sg_list[i].length);
2622 data_size += wr->sg_list[i].length;
2626 *wqe_size += wr->num_sge;
2631 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2633 struct rdma_sq_rdma_wqe_1st *rwqe,
2634 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2635 struct ib_send_wr *wr,
2636 struct ib_send_wr **bad_wr)
2638 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2639 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2641 if (wr->send_flags & IB_SEND_INLINE &&
2642 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2643 wr->opcode == IB_WR_RDMA_WRITE)) {
2646 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2647 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2648 bad_wr, &rwqe->flags, flags);
2651 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2654 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2656 struct rdma_sq_send_wqe_1st *swqe,
2657 struct rdma_sq_send_wqe_2st *swqe2,
2658 struct ib_send_wr *wr,
2659 struct ib_send_wr **bad_wr)
2661 memset(swqe2, 0, sizeof(*swqe2));
2662 if (wr->send_flags & IB_SEND_INLINE) {
2665 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2666 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2667 bad_wr, &swqe->flags, flags);
2670 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2673 static int qedr_prepare_reg(struct qedr_qp *qp,
2674 struct rdma_sq_fmr_wqe_1st *fwqe1,
2675 struct ib_reg_wr *wr)
2677 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2678 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2680 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2681 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2682 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2683 fwqe1->l_key = wr->key;
2685 fwqe2->access_ctrl = 0;
2687 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2688 !!(wr->access & IB_ACCESS_REMOTE_READ));
2689 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2690 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2691 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2692 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2693 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2694 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2695 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2696 fwqe2->fmr_ctrl = 0;
2698 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2699 ilog2(mr->ibmr.page_size) - 12);
2701 fwqe2->length_hi = 0;
2702 fwqe2->length_lo = mr->ibmr.length;
2703 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2704 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2706 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2711 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2714 case IB_WR_RDMA_WRITE:
2715 case IB_WR_RDMA_WRITE_WITH_IMM:
2716 return IB_WC_RDMA_WRITE;
2717 case IB_WR_SEND_WITH_IMM:
2719 case IB_WR_SEND_WITH_INV:
2721 case IB_WR_RDMA_READ:
2722 return IB_WC_RDMA_READ;
2723 case IB_WR_ATOMIC_CMP_AND_SWP:
2724 return IB_WC_COMP_SWAP;
2725 case IB_WR_ATOMIC_FETCH_AND_ADD:
2726 return IB_WC_FETCH_ADD;
2728 return IB_WC_REG_MR;
2729 case IB_WR_LOCAL_INV:
2730 return IB_WC_LOCAL_INV;
2736 static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2738 int wq_is_full, err_wr, pbl_is_full;
2739 struct qedr_dev *dev = qp->dev;
2741 /* prevent SQ overflow and/or processing of a bad WR */
2742 err_wr = wr->num_sge > qp->sq.max_sges;
2743 wq_is_full = qedr_wq_is_full(&qp->sq);
2744 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2745 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2746 if (wq_is_full || err_wr || pbl_is_full) {
2747 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2749 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2751 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2754 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2756 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2758 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2762 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2764 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2766 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2773 static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2774 struct ib_send_wr **bad_wr)
2776 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2777 struct qedr_qp *qp = get_qedr_qp(ibqp);
2778 struct rdma_sq_atomic_wqe_1st *awqe1;
2779 struct rdma_sq_atomic_wqe_2nd *awqe2;
2780 struct rdma_sq_atomic_wqe_3rd *awqe3;
2781 struct rdma_sq_send_wqe_2st *swqe2;
2782 struct rdma_sq_local_inv_wqe *iwqe;
2783 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2784 struct rdma_sq_send_wqe_1st *swqe;
2785 struct rdma_sq_rdma_wqe_1st *rwqe;
2786 struct rdma_sq_fmr_wqe_1st *fwqe1;
2787 struct rdma_sq_common_wqe *wqe;
2792 if (!qedr_can_post_send(qp, wr)) {
2797 wqe = qed_chain_produce(&qp->sq.pbl);
2798 qp->wqe_wr_id[qp->sq.prod].signaled =
2799 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2802 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2803 !!(wr->send_flags & IB_SEND_SOLICITED));
2804 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2805 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2806 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2807 !!(wr->send_flags & IB_SEND_FENCE));
2808 wqe->prev_wqe_size = qp->prev_wqe_size;
2810 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2812 switch (wr->opcode) {
2813 case IB_WR_SEND_WITH_IMM:
2814 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2815 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2817 swqe2 = qed_chain_produce(&qp->sq.pbl);
2819 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2820 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2822 swqe->length = cpu_to_le32(length);
2823 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2824 qp->prev_wqe_size = swqe->wqe_size;
2825 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2828 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2829 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2832 swqe2 = qed_chain_produce(&qp->sq.pbl);
2833 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2835 swqe->length = cpu_to_le32(length);
2836 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2837 qp->prev_wqe_size = swqe->wqe_size;
2838 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2840 case IB_WR_SEND_WITH_INV:
2841 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2842 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2843 swqe2 = qed_chain_produce(&qp->sq.pbl);
2845 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2846 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2848 swqe->length = cpu_to_le32(length);
2849 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2850 qp->prev_wqe_size = swqe->wqe_size;
2851 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2854 case IB_WR_RDMA_WRITE_WITH_IMM:
2855 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2856 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2859 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2860 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2861 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2863 rwqe->length = cpu_to_le32(length);
2864 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2865 qp->prev_wqe_size = rwqe->wqe_size;
2866 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2868 case IB_WR_RDMA_WRITE:
2869 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2870 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2873 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2874 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2876 rwqe->length = cpu_to_le32(length);
2877 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2878 qp->prev_wqe_size = rwqe->wqe_size;
2879 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2881 case IB_WR_RDMA_READ_WITH_INV:
2883 "RDMA READ WITH INVALIDATE not supported\n");
2888 case IB_WR_RDMA_READ:
2889 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2890 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2893 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2894 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2896 rwqe->length = cpu_to_le32(length);
2897 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2898 qp->prev_wqe_size = rwqe->wqe_size;
2899 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2902 case IB_WR_ATOMIC_CMP_AND_SWP:
2903 case IB_WR_ATOMIC_FETCH_AND_ADD:
2904 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2905 awqe1->wqe_size = 4;
2907 awqe2 = qed_chain_produce(&qp->sq.pbl);
2908 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2909 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2911 awqe3 = qed_chain_produce(&qp->sq.pbl);
2913 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2914 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2915 DMA_REGPAIR_LE(awqe3->swap_data,
2916 atomic_wr(wr)->compare_add);
2918 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2919 DMA_REGPAIR_LE(awqe3->swap_data,
2920 atomic_wr(wr)->swap);
2921 DMA_REGPAIR_LE(awqe3->cmp_data,
2922 atomic_wr(wr)->compare_add);
2925 qedr_prepare_sq_sges(qp, NULL, wr);
2927 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2928 qp->prev_wqe_size = awqe1->wqe_size;
2931 case IB_WR_LOCAL_INV:
2932 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2935 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2936 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2937 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2938 qp->prev_wqe_size = iwqe->wqe_size;
2941 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2942 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2943 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2944 fwqe1->wqe_size = 2;
2946 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2948 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2953 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2954 qp->prev_wqe_size = fwqe1->wqe_size;
2957 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2966 /* Restore prod to its position before
2967 * this WR was processed
2969 value = le16_to_cpu(qp->sq.db_data.data.value);
2970 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2972 /* Restore prev_wqe_size */
2973 qp->prev_wqe_size = wqe->prev_wqe_size;
2975 DP_ERR(dev, "POST SEND FAILED\n");
2981 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2982 struct ib_send_wr **bad_wr)
2984 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2985 struct qedr_qp *qp = get_qedr_qp(ibqp);
2986 unsigned long flags;
2991 if (qp->qp_type == IB_QPT_GSI)
2992 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2994 spin_lock_irqsave(&qp->q_lock, flags);
2996 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
2997 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2998 (qp->state != QED_ROCE_QP_STATE_SQD)) {
2999 spin_unlock_irqrestore(&qp->q_lock, flags);
3001 DP_DEBUG(dev, QEDR_MSG_CQ,
3002 "QP in wrong state! QP icid=0x%x state %d\n",
3003 qp->icid, qp->state);
3008 rc = __qedr_post_send(ibqp, wr, bad_wr);
3012 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3014 qedr_inc_sw_prod(&qp->sq);
3016 qp->sq.db_data.data.value++;
3022 * If there was a failure in the first WR then it will be triggered in
3023 * vane. However this is not harmful (as long as the producer value is
3024 * unchanged). For performance reasons we avoid checking for this
3025 * redundant doorbell.
3028 writel(qp->sq.db_data.raw, qp->sq.db);
3030 /* Make sure write sticks */
3033 spin_unlock_irqrestore(&qp->q_lock, flags);
3038 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3039 struct ib_recv_wr **bad_wr)
3041 struct qedr_qp *qp = get_qedr_qp(ibqp);
3042 struct qedr_dev *dev = qp->dev;
3043 unsigned long flags;
3046 if (qp->qp_type == IB_QPT_GSI)
3047 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3049 spin_lock_irqsave(&qp->q_lock, flags);
3051 if (qp->state == QED_ROCE_QP_STATE_RESET) {
3052 spin_unlock_irqrestore(&qp->q_lock, flags);
3060 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3061 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3062 wr->num_sge > qp->rq.max_sges) {
3063 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3064 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3065 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3071 for (i = 0; i < wr->num_sge; i++) {
3073 struct rdma_rq_sge *rqe =
3074 qed_chain_produce(&qp->rq.pbl);
3076 /* First one must include the number
3077 * of SGE in the list
3080 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3083 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3084 wr->sg_list[i].lkey);
3086 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3087 wr->sg_list[i].length, flags);
3090 /* Special case of no sges. FW requires between 1-4 sges...
3091 * in this case we need to post 1 sge with length zero. this is
3092 * because rdma write with immediate consumes an RQ.
3096 struct rdma_rq_sge *rqe =
3097 qed_chain_produce(&qp->rq.pbl);
3099 /* First one must include the number
3100 * of SGE in the list
3102 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3103 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3105 RQ_SGE_SET(rqe, 0, 0, flags);
3109 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3110 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3112 qedr_inc_sw_prod(&qp->rq);
3114 /* Flush all the writes before signalling doorbell */
3117 qp->rq.db_data.data.value++;
3119 writel(qp->rq.db_data.raw, qp->rq.db);
3121 /* Make sure write sticks */
3127 spin_unlock_irqrestore(&qp->q_lock, flags);
3132 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3134 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3136 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3140 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3142 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3145 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3146 resp_cqe->qp_handle.lo,
3151 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3153 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3155 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3158 /* Return latest CQE (needs processing) */
3159 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3161 return cq->latest_cqe;
3164 /* In fmr we need to increase the number of fmr completed counter for the fmr
3165 * algorithm determining whether we can free a pbl or not.
3166 * we need to perform this whether the work request was signaled or not. for
3167 * this purpose we call this function from the condition that checks if a wr
3168 * should be skipped, to make sure we don't miss it ( possibly this fmr
3169 * operation was not signalted)
3171 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3173 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3174 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3177 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3178 struct qedr_cq *cq, int num_entries,
3179 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3184 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3185 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3186 qedr_chk_if_fmr(qp);
3192 wc->status = status;
3195 wc->src_qp = qp->id;
3198 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3199 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3201 switch (wc->opcode) {
3202 case IB_WC_RDMA_WRITE:
3203 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3205 case IB_WC_COMP_SWAP:
3206 case IB_WC_FETCH_ADD:
3210 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3220 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3221 qed_chain_consume(&qp->sq.pbl);
3222 qedr_inc_sw_cons(&qp->sq);
3228 static int qedr_poll_cq_req(struct qedr_dev *dev,
3229 struct qedr_qp *qp, struct qedr_cq *cq,
3230 int num_entries, struct ib_wc *wc,
3231 struct rdma_cqe_requester *req)
3235 switch (req->status) {
3236 case RDMA_CQE_REQ_STS_OK:
3237 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3240 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3241 if (qp->state != QED_ROCE_QP_STATE_ERR)
3243 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3244 cq->icid, qp->icid);
3245 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3246 IB_WC_WR_FLUSH_ERR, 1);
3249 /* process all WQE before the cosumer */
3250 qp->state = QED_ROCE_QP_STATE_ERR;
3251 cnt = process_req(dev, qp, cq, num_entries, wc,
3252 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3254 /* if we have extra WC fill it with actual error info */
3255 if (cnt < num_entries) {
3256 enum ib_wc_status wc_status;
3258 switch (req->status) {
3259 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3261 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3262 cq->icid, qp->icid);
3263 wc_status = IB_WC_BAD_RESP_ERR;
3265 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3267 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3268 cq->icid, qp->icid);
3269 wc_status = IB_WC_LOC_LEN_ERR;
3271 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3273 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3274 cq->icid, qp->icid);
3275 wc_status = IB_WC_LOC_QP_OP_ERR;
3277 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3279 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3280 cq->icid, qp->icid);
3281 wc_status = IB_WC_LOC_PROT_ERR;
3283 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3285 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3286 cq->icid, qp->icid);
3287 wc_status = IB_WC_MW_BIND_ERR;
3289 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3291 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3292 cq->icid, qp->icid);
3293 wc_status = IB_WC_REM_INV_REQ_ERR;
3295 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3297 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3298 cq->icid, qp->icid);
3299 wc_status = IB_WC_REM_ACCESS_ERR;
3301 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3303 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3304 cq->icid, qp->icid);
3305 wc_status = IB_WC_REM_OP_ERR;
3307 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3309 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3310 cq->icid, qp->icid);
3311 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3313 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3315 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3316 cq->icid, qp->icid);
3317 wc_status = IB_WC_RETRY_EXC_ERR;
3321 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3322 cq->icid, qp->icid);
3323 wc_status = IB_WC_GENERAL_ERR;
3325 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3333 static inline int qedr_cqe_resp_status_to_ib(u8 status)
3336 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3337 return IB_WC_LOC_ACCESS_ERR;
3338 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3339 return IB_WC_LOC_LEN_ERR;
3340 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3341 return IB_WC_LOC_QP_OP_ERR;
3342 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3343 return IB_WC_LOC_PROT_ERR;
3344 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3345 return IB_WC_MW_BIND_ERR;
3346 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3347 return IB_WC_REM_INV_RD_REQ_ERR;
3348 case RDMA_CQE_RESP_STS_OK:
3349 return IB_WC_SUCCESS;
3351 return IB_WC_GENERAL_ERR;
3355 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3358 wc->status = IB_WC_SUCCESS;
3359 wc->byte_len = le32_to_cpu(resp->length);
3361 if (resp->flags & QEDR_RESP_IMM) {
3362 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3363 wc->wc_flags |= IB_WC_WITH_IMM;
3365 if (resp->flags & QEDR_RESP_RDMA)
3366 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3368 if (resp->flags & QEDR_RESP_INV)
3371 } else if (resp->flags & QEDR_RESP_INV) {
3372 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3373 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3375 if (resp->flags & QEDR_RESP_RDMA)
3378 } else if (resp->flags & QEDR_RESP_RDMA) {
3385 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3386 struct qedr_cq *cq, struct ib_wc *wc,
3387 struct rdma_cqe_responder *resp, u64 wr_id)
3389 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
3390 wc->opcode = IB_WC_RECV;
3393 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3394 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3396 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3397 cq, cq->icid, resp->flags);
3400 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3401 if (wc->status == IB_WC_GENERAL_ERR)
3403 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3404 cq, cq->icid, resp->status);
3407 /* Fill the rest of the WC */
3409 wc->src_qp = qp->id;
3414 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3415 struct qedr_cq *cq, struct ib_wc *wc,
3416 struct rdma_cqe_responder *resp)
3418 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3420 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3422 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3423 qed_chain_consume(&qp->rq.pbl);
3424 qedr_inc_sw_cons(&qp->rq);
3429 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3430 int num_entries, struct ib_wc *wc, u16 hw_cons)
3434 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3436 wc->status = IB_WC_WR_FLUSH_ERR;
3439 wc->src_qp = qp->id;
3441 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3446 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3447 qed_chain_consume(&qp->rq.pbl);
3448 qedr_inc_sw_cons(&qp->rq);
3454 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3455 struct rdma_cqe_responder *resp, int *update)
3457 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3463 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3464 struct qedr_cq *cq, int num_entries,
3465 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3470 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3471 cnt = process_resp_flush(qp, cq, num_entries, wc,
3473 try_consume_resp_cqe(cq, qp, resp, update);
3475 cnt = process_resp_one(dev, qp, cq, wc, resp);
3483 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3484 struct rdma_cqe_requester *req, int *update)
3486 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3492 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3494 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3495 struct qedr_cq *cq = get_qedr_cq(ibcq);
3496 union rdma_cqe *cqe = cq->latest_cqe;
3497 u32 old_cons, new_cons;
3498 unsigned long flags;
3502 if (cq->destroyed) {
3504 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3509 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3510 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3512 spin_lock_irqsave(&cq->cq_lock, flags);
3513 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3514 while (num_entries && is_valid_cqe(cq, cqe)) {
3518 /* prevent speculative reads of any field of CQE */
3521 qp = cqe_get_qp(cqe);
3523 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3529 switch (cqe_get_type(cqe)) {
3530 case RDMA_CQE_TYPE_REQUESTER:
3531 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3533 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3535 case RDMA_CQE_TYPE_RESPONDER_RQ:
3536 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3537 &cqe->resp, &update);
3539 case RDMA_CQE_TYPE_INVALID:
3541 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3550 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3552 cq->cq_cons += new_cons - old_cons;
3555 /* doorbell notifies abount latest VALID entry,
3556 * but chain already point to the next INVALID one
3558 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3560 spin_unlock_irqrestore(&cq->cq_lock, flags);
3564 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3566 const struct ib_wc *in_wc,
3567 const struct ib_grh *in_grh,
3568 const struct ib_mad_hdr *mad_hdr,
3569 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3570 size_t *out_mad_size, u16 *out_mad_pkey_index)
3572 struct qedr_dev *dev = get_qedr_dev(ibdev);
3574 DP_DEBUG(dev, QEDR_MSG_GSI,
3575 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3576 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3577 mad_hdr->class_specific, mad_hdr->class_version,
3578 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3579 return IB_MAD_RESULT_SUCCESS;
3582 int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3583 struct ib_port_immutable *immutable)
3585 struct ib_port_attr attr;
3588 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3589 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3591 err = ib_query_port(ibdev, port_num, &attr);
3595 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3596 immutable->gid_tbl_len = attr.gid_tbl_len;
3597 immutable->max_mad_size = IB_MGMT_MAD_SIZE;