1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/random.h>
38 #include <linux/highmem.h>
39 #include <linux/time.h>
40 #include <asm/byteorder.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/iw_cm.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <rdma/ib_umem.h>
49 * i40iw_query_device - get device attributes
50 * @ibdev: device pointer from stack
51 * @props: returning device attributes
54 static int i40iw_query_device(struct ib_device *ibdev,
55 struct ib_device_attr *props,
56 struct ib_udata *udata)
58 struct i40iw_device *iwdev = to_iwdev(ibdev);
60 if (udata->inlen || udata->outlen)
62 memset(props, 0, sizeof(*props));
63 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
64 props->fw_ver = I40IW_FW_VERSION;
65 props->device_cap_flags = iwdev->device_cap_flags;
66 props->vendor_id = iwdev->ldev->pcidev->vendor;
67 props->vendor_part_id = iwdev->ldev->pcidev->device;
68 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
69 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
70 props->max_qp = iwdev->max_qp;
71 props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
72 props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
73 props->max_cq = iwdev->max_cq;
74 props->max_cqe = iwdev->max_cqe;
75 props->max_mr = iwdev->max_mr;
76 props->max_pd = iwdev->max_pd;
77 props->max_sge_rd = I40IW_MAX_SGE_RD;
78 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
79 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
80 props->atomic_cap = IB_ATOMIC_NONE;
81 props->max_map_per_fmr = 1;
82 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
87 * i40iw_query_port - get port attrubutes
88 * @ibdev: device pointer from stack
89 * @port: port number for query
90 * @props: returning device attributes
92 static int i40iw_query_port(struct ib_device *ibdev,
94 struct ib_port_attr *props)
96 struct i40iw_device *iwdev = to_iwdev(ibdev);
97 struct net_device *netdev = iwdev->netdev;
99 memset(props, 0, sizeof(*props));
101 props->max_mtu = IB_MTU_4096;
102 if (netdev->mtu >= 4096)
103 props->active_mtu = IB_MTU_4096;
104 else if (netdev->mtu >= 2048)
105 props->active_mtu = IB_MTU_2048;
106 else if (netdev->mtu >= 1024)
107 props->active_mtu = IB_MTU_1024;
108 else if (netdev->mtu >= 512)
109 props->active_mtu = IB_MTU_512;
111 props->active_mtu = IB_MTU_256;
114 if (netif_carrier_ok(iwdev->netdev))
115 props->state = IB_PORT_ACTIVE;
117 props->state = IB_PORT_DOWN;
118 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
119 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
120 props->gid_tbl_len = 1;
121 props->pkey_tbl_len = 1;
122 props->active_width = IB_WIDTH_4X;
123 props->active_speed = 1;
124 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
129 * i40iw_alloc_ucontext - Allocate the user context data structure
130 * @ibdev: device pointer from stack
133 * This keeps track of all objects associated with a particular
136 static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
137 struct ib_udata *udata)
139 struct i40iw_device *iwdev = to_iwdev(ibdev);
140 struct i40iw_alloc_ucontext_req req;
141 struct i40iw_alloc_ucontext_resp uresp;
142 struct i40iw_ucontext *ucontext;
144 if (ib_copy_from_udata(&req, udata, sizeof(req)))
145 return ERR_PTR(-EINVAL);
147 if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
148 i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
149 req.userspace_ver, I40IW_ABI_USERSPACE_VER);
150 return ERR_PTR(-EINVAL);
153 memset(&uresp, 0, sizeof(uresp));
154 uresp.max_qps = iwdev->max_qp;
155 uresp.max_pds = iwdev->max_pd;
156 uresp.wq_size = iwdev->max_qp_wr * 2;
157 uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
159 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
161 return ERR_PTR(-ENOMEM);
163 ucontext->iwdev = iwdev;
165 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
167 return ERR_PTR(-EFAULT);
170 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
171 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
172 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
173 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
175 return &ucontext->ibucontext;
179 * i40iw_dealloc_ucontext - deallocate the user context data structure
180 * @context: user context created during alloc
182 static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
184 struct i40iw_ucontext *ucontext = to_ucontext(context);
187 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
188 if (!list_empty(&ucontext->cq_reg_mem_list)) {
189 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
192 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
193 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
194 if (!list_empty(&ucontext->qp_reg_mem_list)) {
195 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
198 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
205 * i40iw_mmap - user memory map
206 * @context: context created during alloc
207 * @vma: kernel info for user memory map
209 static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
211 struct i40iw_ucontext *ucontext;
215 ucontext = to_ucontext(context);
216 if (ucontext->iwdev->sc_dev.is_pf) {
217 db_addr_offset = I40IW_DB_ADDR_OFFSET;
218 push_offset = I40IW_PUSH_OFFSET;
220 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
222 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
223 push_offset = I40IW_VF_PUSH_OFFSET;
225 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
228 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
230 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
232 vma->vm_private_data = ucontext;
234 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
235 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
240 if (io_remap_pfn_range(vma, vma->vm_start,
241 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
242 PAGE_SIZE, vma->vm_page_prot))
249 * i40iw_alloc_push_page - allocate a push page for qp
250 * @iwdev: iwarp device
251 * @qp: hardware control qp
253 static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
255 struct i40iw_cqp_request *cqp_request;
256 struct cqp_commands_info *cqp_info;
257 enum i40iw_status_code status;
259 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
262 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
266 atomic_inc(&cqp_request->refcount);
268 cqp_info = &cqp_request->info;
269 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
270 cqp_info->post_sq = 1;
272 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
273 cqp_info->in.u.manage_push_page.info.free_page = 0;
274 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
275 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
277 status = i40iw_handle_cqp_op(iwdev, cqp_request);
279 qp->push_idx = cqp_request->compl_info.op_ret_val;
281 i40iw_pr_err("CQP-OP Push page fail");
282 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
286 * i40iw_dealloc_push_page - free a push page for qp
287 * @iwdev: iwarp device
288 * @qp: hardware control qp
290 static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
292 struct i40iw_cqp_request *cqp_request;
293 struct cqp_commands_info *cqp_info;
294 enum i40iw_status_code status;
296 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
299 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
303 cqp_info = &cqp_request->info;
304 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
305 cqp_info->post_sq = 1;
307 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
308 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
309 cqp_info->in.u.manage_push_page.info.free_page = 1;
310 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
311 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
313 status = i40iw_handle_cqp_op(iwdev, cqp_request);
315 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
317 i40iw_pr_err("CQP-OP Push page fail");
321 * i40iw_alloc_pd - allocate protection domain
322 * @ibdev: device pointer from stack
323 * @context: user context created during alloc
326 static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
327 struct ib_ucontext *context,
328 struct ib_udata *udata)
330 struct i40iw_pd *iwpd;
331 struct i40iw_device *iwdev = to_iwdev(ibdev);
332 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
333 struct i40iw_alloc_pd_resp uresp;
334 struct i40iw_sc_pd *sc_pd;
338 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
339 iwdev->max_pd, &pd_id, &iwdev->next_pd);
341 i40iw_pr_err("alloc resource failed\n");
345 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
351 sc_pd = &iwpd->sc_pd;
352 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
355 memset(&uresp, 0, sizeof(uresp));
357 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
363 i40iw_add_pdusecount(iwpd);
368 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
373 * i40iw_dealloc_pd - deallocate pd
374 * @ibpd: ptr of pd to be deallocated
376 static int i40iw_dealloc_pd(struct ib_pd *ibpd)
378 struct i40iw_pd *iwpd = to_iwpd(ibpd);
379 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
381 i40iw_rem_pdusecount(iwpd, iwdev);
386 * i40iw_qp_roundup - return round up qp ring size
387 * @wr_ring_size: ring size to round up
389 static int i40iw_qp_roundup(u32 wr_ring_size)
393 if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
394 wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
396 for (wr_ring_size--; scount <= 16; scount *= 2)
397 wr_ring_size |= wr_ring_size >> scount;
398 return ++wr_ring_size;
402 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
404 * @va: user virtual address
405 * @pbl_list: pbl list to search in (QP's or CQ's)
407 static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
408 struct list_head *pbl_list)
410 struct i40iw_pbl *iwpbl;
412 list_for_each_entry(iwpbl, pbl_list, list) {
413 if (iwpbl->user_base == va) {
414 list_del(&iwpbl->list);
422 * i40iw_free_qp_resources - free up memory resources for qp
423 * @iwdev: iwarp device
424 * @iwqp: qp ptr (user or kernel)
425 * @qp_num: qp number assigned
427 void i40iw_free_qp_resources(struct i40iw_device *iwdev,
428 struct i40iw_qp *iwqp,
431 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
433 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
434 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
435 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
436 kfree(iwqp->kqp.wrid_mem);
437 iwqp->kqp.wrid_mem = NULL;
438 kfree(iwqp->allocated_buffer);
442 * i40iw_clean_cqes - clean cq entries for qp
443 * @iwqp: qp ptr (user or kernel)
446 static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
448 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
450 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
454 * i40iw_destroy_qp - destroy qp
455 * @ibqp: qp's ib pointer also to get to device's qp address
457 static int i40iw_destroy_qp(struct ib_qp *ibqp)
459 struct i40iw_qp *iwqp = to_iwqp(ibqp);
463 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
464 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
466 if (!iwqp->user_mode) {
468 i40iw_clean_cqes(iwqp, iwqp->iwscq);
469 if (iwqp->iwrcq != iwqp->iwscq)
470 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
474 i40iw_rem_ref(&iwqp->ibqp);
479 * i40iw_setup_virt_qp - setup for allocation of virtual qp
482 * @init_info: initialize info to return
484 static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
485 struct i40iw_qp *iwqp,
486 struct i40iw_qp_init_info *init_info)
488 struct i40iw_pbl *iwpbl = iwqp->iwpbl;
489 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
491 iwqp->page = qpmr->sq_page;
492 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
493 if (iwpbl->pbl_allocated) {
494 init_info->virtual_map = true;
495 init_info->sq_pa = qpmr->sq_pbl.idx;
496 init_info->rq_pa = qpmr->rq_pbl.idx;
498 init_info->sq_pa = qpmr->sq_pbl.addr;
499 init_info->rq_pa = qpmr->rq_pbl.addr;
505 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
506 * @iwdev: iwarp device
507 * @iwqp: qp ptr (user or kernel)
508 * @info: initialize info to return
510 static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
511 struct i40iw_qp *iwqp,
512 struct i40iw_qp_init_info *info)
514 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
515 u32 sqdepth, rqdepth;
516 u32 sq_size, rq_size;
519 enum i40iw_status_code status;
520 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
522 sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
523 rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
525 status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
527 status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
532 sqdepth = sq_size << sqshift;
533 rqdepth = rq_size << rqshift;
535 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
536 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
538 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
539 if (!ukinfo->sq_wrtrk_array)
542 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
544 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
545 size += (I40IW_SHADOW_AREA_SIZE << 3);
547 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
549 kfree(ukinfo->sq_wrtrk_array);
550 ukinfo->sq_wrtrk_array = NULL;
554 ukinfo->sq = mem->va;
555 info->sq_pa = mem->pa;
557 ukinfo->rq = &ukinfo->sq[sqdepth];
558 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
560 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
561 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
563 ukinfo->sq_size = sq_size;
564 ukinfo->rq_size = rq_size;
565 ukinfo->qp_id = iwqp->ibqp.qp_num;
570 * i40iw_create_qp - create qp
572 * @init_attr: attributes for qp
573 * @udata: user data for create qp
575 static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
576 struct ib_qp_init_attr *init_attr,
577 struct ib_udata *udata)
579 struct i40iw_pd *iwpd = to_iwpd(ibpd);
580 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
581 struct i40iw_cqp *iwcqp = &iwdev->cqp;
582 struct i40iw_qp *iwqp;
583 struct i40iw_ucontext *ucontext;
584 struct i40iw_create_qp_req req;
585 struct i40iw_create_qp_resp uresp;
588 enum i40iw_status_code ret;
592 struct i40iw_sc_qp *qp;
593 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
594 struct i40iw_qp_init_info init_info;
595 struct i40iw_create_qp_info *qp_info;
596 struct i40iw_cqp_request *cqp_request;
597 struct cqp_commands_info *cqp_info;
599 struct i40iw_qp_host_ctx_info *ctx_info;
600 struct i40iwarp_offload_info *iwarp_info;
603 if (init_attr->create_flags)
604 return ERR_PTR(-EINVAL);
605 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
606 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
608 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
609 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
611 memset(&init_info, 0, sizeof(init_info));
613 sq_size = init_attr->cap.max_send_wr;
614 rq_size = init_attr->cap.max_recv_wr;
616 init_info.qp_uk_init_info.sq_size = sq_size;
617 init_info.qp_uk_init_info.rq_size = rq_size;
618 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
619 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
620 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
622 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
624 return ERR_PTR(-ENOMEM);
626 iwqp = (struct i40iw_qp *)mem;
628 qp->back_qp = (void *)iwqp;
629 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
631 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
633 if (i40iw_allocate_dma_mem(dev->hw,
635 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
637 i40iw_pr_err("dma_mem failed\n");
642 init_info.q2 = iwqp->q2_ctx_mem.va;
643 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
645 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
646 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
648 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
649 &qp_num, &iwdev->next_qp);
651 i40iw_pr_err("qp resource\n");
655 iwqp->allocated_buffer = mem;
658 iwqp->ibqp.qp_num = qp_num;
660 iwqp->iwscq = to_iwcq(init_attr->send_cq);
661 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
663 iwqp->host_ctx.va = init_info.host_ctx;
664 iwqp->host_ctx.pa = init_info.host_ctx_pa;
665 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
667 init_info.pd = &iwpd->sc_pd;
668 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
669 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
671 if (init_attr->qp_type != IB_QPT_RC) {
675 if (iwdev->push_mode)
676 i40iw_alloc_push_page(iwdev, qp);
678 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
680 i40iw_pr_err("ib_copy_from_data\n");
683 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
684 if (ibpd->uobject && ibpd->uobject->context) {
686 ucontext = to_ucontext(ibpd->uobject->context);
688 if (req.user_wqe_buffers) {
690 &ucontext->qp_reg_mem_list_lock, flags);
691 iwqp->iwpbl = i40iw_get_pbl(
692 (unsigned long)req.user_wqe_buffers,
693 &ucontext->qp_reg_mem_list);
694 spin_unlock_irqrestore(
695 &ucontext->qp_reg_mem_list_lock, flags);
699 i40iw_pr_err("no pbl info\n");
704 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
706 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
710 i40iw_pr_err("setup qp failed\n");
714 init_info.type = I40IW_QP_TYPE_IWARP;
715 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
718 i40iw_pr_err("qp_init fail\n");
721 ctx_info = &iwqp->ctx_info;
722 iwarp_info = &iwqp->iwarp_info;
723 iwarp_info->rd_enable = true;
724 iwarp_info->wr_rdresp_en = true;
725 if (!iwqp->user_mode) {
726 iwarp_info->fast_reg_en = true;
727 iwarp_info->priv_mode_en = true;
729 iwarp_info->ddp_ver = 1;
730 iwarp_info->rdmap_ver = 1;
732 ctx_info->iwarp_info_valid = true;
733 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
734 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
735 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
736 ctx_info->push_mode_en = false;
738 ctx_info->push_mode_en = true;
739 ctx_info->push_idx = qp->push_idx;
742 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
743 (u64 *)iwqp->host_ctx.va,
745 ctx_info->iwarp_info_valid = false;
746 cqp_request = i40iw_get_cqp_request(iwcqp, true);
751 cqp_info = &cqp_request->info;
752 qp_info = &cqp_request->info.in.u.qp_create.info;
754 memset(qp_info, 0, sizeof(*qp_info));
756 qp_info->cq_num_valid = true;
757 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
759 cqp_info->cqp_cmd = OP_QP_CREATE;
760 cqp_info->post_sq = 1;
761 cqp_info->in.u.qp_create.qp = qp;
762 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
763 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
765 i40iw_pr_err("CQP-OP QP create fail");
770 i40iw_add_ref(&iwqp->ibqp);
771 spin_lock_init(&iwqp->lock);
772 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
773 iwdev->qp_table[qp_num] = iwqp;
774 i40iw_add_pdusecount(iwqp->iwpd);
775 if (ibpd->uobject && udata) {
776 memset(&uresp, 0, sizeof(uresp));
777 uresp.actual_sq_size = sq_size;
778 uresp.actual_rq_size = rq_size;
779 uresp.qp_id = qp_num;
780 uresp.push_idx = qp->push_idx;
781 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
783 i40iw_pr_err("copy_to_udata failed\n");
784 i40iw_destroy_qp(&iwqp->ibqp);
785 /* let the completion of the qp destroy free the qp */
786 return ERR_PTR(err_code);
789 init_completion(&iwqp->sq_drained);
790 init_completion(&iwqp->rq_drained);
794 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
795 return ERR_PTR(err_code);
799 * i40iw_query - query qp attributes
801 * @attr: attributes pointer
802 * @attr_mask: Not used
803 * @init_attr: qp attributes to return
805 static int i40iw_query_qp(struct ib_qp *ibqp,
806 struct ib_qp_attr *attr,
808 struct ib_qp_init_attr *init_attr)
810 struct i40iw_qp *iwqp = to_iwqp(ibqp);
811 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
813 attr->qp_access_flags = 0;
814 attr->cap.max_send_wr = qp->qp_uk.sq_size;
815 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
816 attr->cap.max_recv_sge = 1;
817 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
818 init_attr->event_handler = iwqp->ibqp.event_handler;
819 init_attr->qp_context = iwqp->ibqp.qp_context;
820 init_attr->send_cq = iwqp->ibqp.send_cq;
821 init_attr->recv_cq = iwqp->ibqp.recv_cq;
822 init_attr->srq = iwqp->ibqp.srq;
823 init_attr->cap = attr->cap;
828 * i40iw_hw_modify_qp - setup cqp for modify qp
829 * @iwdev: iwarp device
830 * @iwqp: qp ptr (user or kernel)
831 * @info: info for modify qp
832 * @wait: flag to wait or not for modify qp completion
834 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
835 struct i40iw_modify_qp_info *info, bool wait)
837 enum i40iw_status_code status;
838 struct i40iw_cqp_request *cqp_request;
839 struct cqp_commands_info *cqp_info;
840 struct i40iw_modify_qp_info *m_info;
842 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
846 cqp_info = &cqp_request->info;
847 m_info = &cqp_info->in.u.qp_modify.info;
848 memcpy(m_info, info, sizeof(*m_info));
849 cqp_info->cqp_cmd = OP_QP_MODIFY;
850 cqp_info->post_sq = 1;
851 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
852 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
853 status = i40iw_handle_cqp_op(iwdev, cqp_request);
855 i40iw_pr_err("CQP-OP Modify QP fail");
859 * i40iw_modify_qp - modify qp request
860 * @ibqp: qp's pointer for modify
861 * @attr: access attributes
862 * @attr_mask: state mask
865 int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
866 int attr_mask, struct ib_udata *udata)
868 struct i40iw_qp *iwqp = to_iwqp(ibqp);
869 struct i40iw_device *iwdev = iwqp->iwdev;
870 struct i40iw_qp_host_ctx_info *ctx_info;
871 struct i40iwarp_offload_info *iwarp_info;
872 struct i40iw_modify_qp_info info;
873 u8 issue_modify_qp = 0;
878 memset(&info, 0, sizeof(info));
879 ctx_info = &iwqp->ctx_info;
880 iwarp_info = &iwqp->iwarp_info;
882 spin_lock_irqsave(&iwqp->lock, flags);
884 if (attr_mask & IB_QP_STATE) {
885 switch (attr->qp_state) {
888 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
892 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
893 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
898 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
905 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
907 info.next_iwarp_state = I40IW_QP_STATE_RTS;
908 info.tcp_ctx_valid = true;
909 info.ord_valid = true;
910 info.arp_cache_idx_valid = true;
911 info.cq_num_valid = true;
914 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
918 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
919 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
923 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
927 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
931 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
935 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
940 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
944 if (iwqp->sc_qp.term_flags)
945 del_timer(&iwqp->terminate_timer);
946 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
947 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
949 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
950 info.reset_tcp_conn = true;
954 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
961 iwqp->ibqp_state = attr->qp_state;
964 iwqp->iwarp_state = info.next_iwarp_state;
966 info.next_iwarp_state = iwqp->iwarp_state;
968 if (attr_mask & IB_QP_ACCESS_FLAGS) {
969 ctx_info->iwarp_info_valid = true;
970 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
971 iwarp_info->wr_rdresp_en = true;
972 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
973 iwarp_info->wr_rdresp_en = true;
974 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
975 iwarp_info->rd_enable = true;
976 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
977 iwarp_info->bind_en = true;
979 if (iwqp->user_mode) {
980 iwarp_info->rd_enable = true;
981 iwarp_info->wr_rdresp_en = true;
982 iwarp_info->priv_mode_en = false;
986 if (ctx_info->iwarp_info_valid) {
987 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
990 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
991 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
992 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
993 (u64 *)iwqp->host_ctx.va,
996 i40iw_pr_err("setting QP context\n");
1002 spin_unlock_irqrestore(&iwqp->lock, flags);
1004 if (issue_modify_qp)
1005 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1007 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
1009 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1010 spin_lock_irqsave(&iwqp->lock, flags);
1011 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1012 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1013 spin_unlock_irqrestore(&iwqp->lock, flags);
1019 spin_unlock_irqrestore(&iwqp->lock, flags);
1024 * cq_free_resources - free up recources for cq
1025 * @iwdev: iwarp device
1028 static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1030 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1032 if (!iwcq->user_mode)
1033 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1034 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1038 * cq_wq_destroy - send cq destroy cqp
1039 * @iwdev: iwarp device
1040 * @cq: hardware control cq
1042 static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1044 enum i40iw_status_code status;
1045 struct i40iw_cqp_request *cqp_request;
1046 struct cqp_commands_info *cqp_info;
1048 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1052 cqp_info = &cqp_request->info;
1054 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1055 cqp_info->post_sq = 1;
1056 cqp_info->in.u.cq_destroy.cq = cq;
1057 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1058 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1060 i40iw_pr_err("CQP-OP Destroy QP fail");
1064 * i40iw_destroy_cq - destroy cq
1065 * @ib_cq: cq pointer
1067 static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1069 struct i40iw_cq *iwcq;
1070 struct i40iw_device *iwdev;
1071 struct i40iw_sc_cq *cq;
1074 i40iw_pr_err("ib_cq == NULL\n");
1078 iwcq = to_iwcq(ib_cq);
1079 iwdev = to_iwdev(ib_cq->device);
1081 cq_wq_destroy(iwdev, cq);
1082 cq_free_resources(iwdev, iwcq);
1088 * i40iw_create_cq - create cq
1089 * @ibdev: device pointer from stack
1090 * @attr: attributes for cq
1091 * @context: user context created during alloc
1094 static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1095 const struct ib_cq_init_attr *attr,
1096 struct ib_ucontext *context,
1097 struct ib_udata *udata)
1099 struct i40iw_device *iwdev = to_iwdev(ibdev);
1100 struct i40iw_cq *iwcq;
1101 struct i40iw_pbl *iwpbl;
1103 struct i40iw_sc_cq *cq;
1104 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1105 struct i40iw_cq_init_info info;
1106 enum i40iw_status_code status;
1107 struct i40iw_cqp_request *cqp_request;
1108 struct cqp_commands_info *cqp_info;
1109 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1110 unsigned long flags;
1112 int entries = attr->cqe;
1114 if (entries > iwdev->max_cqe)
1115 return ERR_PTR(-EINVAL);
1117 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1119 return ERR_PTR(-ENOMEM);
1121 memset(&info, 0, sizeof(info));
1123 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1124 iwdev->max_cq, &cq_num,
1130 cq->back_cq = (void *)iwcq;
1131 spin_lock_init(&iwcq->lock);
1134 ukinfo->cq_size = max(entries, 4);
1135 ukinfo->cq_id = cq_num;
1136 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1139 info.ceq_id_valid = true;
1141 info.type = I40IW_CQ_TYPE_IWARP;
1143 struct i40iw_ucontext *ucontext;
1144 struct i40iw_create_cq_req req;
1145 struct i40iw_cq_mr *cqmr;
1147 memset(&req, 0, sizeof(req));
1148 iwcq->user_mode = true;
1149 ucontext = to_ucontext(context);
1150 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
1151 goto cq_free_resources;
1153 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1154 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1155 &ucontext->cq_reg_mem_list);
1156 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1159 goto cq_free_resources;
1162 iwcq->iwpbl = iwpbl;
1163 iwcq->cq_mem_size = 0;
1164 cqmr = &iwpbl->cq_mr;
1165 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1166 if (iwpbl->pbl_allocated) {
1167 info.virtual_map = true;
1168 info.pbl_chunk_size = 1;
1169 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1171 info.cq_base_pa = cqmr->cq_pbl.addr;
1174 /* Kmode allocations */
1178 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1179 rsize = round_up(rsize, 256);
1180 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1181 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1182 rsize + shadow, 256);
1185 goto cq_free_resources;
1187 ukinfo->cq_base = iwcq->kmem.va;
1188 info.cq_base_pa = iwcq->kmem.pa;
1189 info.shadow_area_pa = info.cq_base_pa + rsize;
1190 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1193 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1194 i40iw_pr_err("init cq fail\n");
1196 goto cq_free_resources;
1199 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1202 goto cq_free_resources;
1205 cqp_info = &cqp_request->info;
1206 cqp_info->cqp_cmd = OP_CQ_CREATE;
1207 cqp_info->post_sq = 1;
1208 cqp_info->in.u.cq_create.cq = cq;
1209 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1210 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1212 i40iw_pr_err("CQP-OP Create QP fail");
1214 goto cq_free_resources;
1218 struct i40iw_create_cq_resp resp;
1220 memset(&resp, 0, sizeof(resp));
1221 resp.cq_id = info.cq_uk_init_info.cq_id;
1222 resp.cq_size = info.cq_uk_init_info.cq_size;
1223 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1224 i40iw_pr_err("copy to user data\n");
1230 return (struct ib_cq *)iwcq;
1233 cq_wq_destroy(iwdev, cq);
1235 cq_free_resources(iwdev, iwcq);
1238 return ERR_PTR(err_code);
1242 * i40iw_get_user_access - get hw access from IB access
1243 * @acc: IB access to return hw access
1245 static inline u16 i40iw_get_user_access(int acc)
1249 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1250 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1251 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1252 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1257 * i40iw_free_stag - free stag resource
1258 * @iwdev: iwarp device
1259 * @stag: stag to free
1261 static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1265 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1266 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1270 * i40iw_create_stag - create random stag
1271 * @iwdev: iwarp device
1273 static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1277 u32 next_stag_index;
1283 get_random_bytes(&random, sizeof(random));
1284 consumer_key = (u8)random;
1286 driver_key = random & ~iwdev->mr_stagmask;
1287 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1288 next_stag_index %= iwdev->max_mr;
1290 ret = i40iw_alloc_resource(iwdev,
1291 iwdev->allocated_mrs, iwdev->max_mr,
1292 &stag_index, &next_stag_index);
1294 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1296 stag += (u32)consumer_key;
1302 * i40iw_next_pbl_addr - Get next pbl address
1303 * @palloc: Poiner to allocated pbles
1304 * @pbl: pointer to a pble
1305 * @pinfo: info pointer
1308 static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
1310 struct i40iw_pble_info **pinfo,
1314 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1318 return (u64 *)(*pinfo)->addr;
1322 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1323 * @iwmr: iwmr for IB's user page addresses
1324 * @pbl: ple pointer to save 1 level or 0 level pble
1325 * @level: indicated level 0, 1 or 2
1327 static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1329 enum i40iw_pble_level level)
1331 struct ib_umem *region = iwmr->region;
1332 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1333 int chunk_pages, entry, pg_shift, i;
1334 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1335 struct i40iw_pble_info *pinfo;
1336 struct scatterlist *sg;
1339 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
1340 pg_shift = ffs(region->page_size) - 1;
1341 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1342 chunk_pages = sg_dma_len(sg) >> pg_shift;
1343 if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1344 !iwpbl->qp_mr.sq_page)
1345 iwpbl->qp_mr.sq_page = sg_page(sg);
1346 for (i = 0; i < chunk_pages; i++) {
1347 *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
1348 pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
1354 * i40iw_setup_pbles - copy user pg address to pble's
1355 * @iwdev: iwarp device
1356 * @iwmr: mr pointer for this memory registration
1357 * @use_pbles: flag if to use pble's or memory (level 0)
1359 static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1360 struct i40iw_mr *iwmr,
1363 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1364 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1365 struct i40iw_pble_info *pinfo;
1367 enum i40iw_status_code status;
1368 enum i40iw_pble_level level = I40IW_LEVEL_1;
1370 if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
1374 mutex_lock(&iwdev->pbl_mutex);
1375 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1376 mutex_unlock(&iwdev->pbl_mutex);
1380 iwpbl->pbl_allocated = true;
1381 level = palloc->level;
1382 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1383 pbl = (u64 *)pinfo->addr;
1385 pbl = iwmr->pgaddrmem;
1388 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
1393 * i40iw_handle_q_mem - handle memory for qp and cq
1394 * @iwdev: iwarp device
1395 * @req: information for q memory management
1396 * @iwpbl: pble struct
1397 * @use_pbles: flag to use pble
1399 static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1400 struct i40iw_mem_reg_req *req,
1401 struct i40iw_pbl *iwpbl,
1404 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1405 struct i40iw_mr *iwmr = iwpbl->iwmr;
1406 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1407 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1408 struct i40iw_hmc_pble *hmc_p;
1409 u64 *arr = iwmr->pgaddrmem;
1413 total = req->sq_pages + req->rq_pages + req->cq_pages;
1415 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1418 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1419 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1420 iwpbl->pbl_allocated = false;
1425 arr = (u64 *)palloc->level1.addr;
1426 if (req->reg_type == IW_MEMREG_TYPE_QP) {
1427 hmc_p = &qpmr->sq_pbl;
1428 qpmr->shadow = (dma_addr_t)arr[total];
1430 hmc_p->idx = palloc->level1.idx;
1431 hmc_p = &qpmr->rq_pbl;
1432 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1434 hmc_p->addr = arr[0];
1435 hmc_p = &qpmr->rq_pbl;
1436 hmc_p->addr = arr[1];
1439 hmc_p = &cqmr->cq_pbl;
1440 cqmr->shadow = (dma_addr_t)arr[total];
1442 hmc_p->idx = palloc->level1.idx;
1444 hmc_p->addr = arr[0];
1450 * i40iw_hw_alloc_stag - cqp command to allocate stag
1451 * @iwdev: iwarp device
1452 * @iwmr: iwarp mr pointer
1454 static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1456 struct i40iw_allocate_stag_info *info;
1457 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1458 enum i40iw_status_code status;
1460 struct i40iw_cqp_request *cqp_request;
1461 struct cqp_commands_info *cqp_info;
1463 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1467 cqp_info = &cqp_request->info;
1468 info = &cqp_info->in.u.alloc_stag.info;
1469 memset(info, 0, sizeof(*info));
1470 info->page_size = PAGE_SIZE;
1471 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1472 info->pd_id = iwpd->sc_pd.pd_id;
1473 info->total_len = iwmr->length;
1474 info->remote_access = true;
1475 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1476 cqp_info->post_sq = 1;
1477 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1478 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1480 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1483 i40iw_pr_err("CQP-OP MR Reg fail");
1489 * i40iw_alloc_mr - register stag for fast memory registration
1491 * @mr_type: memory for stag registrion
1492 * @max_num_sg: man number of pages
1494 static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1495 enum ib_mr_type mr_type,
1498 struct i40iw_pd *iwpd = to_iwpd(pd);
1499 struct i40iw_device *iwdev = to_iwdev(pd->device);
1500 struct i40iw_pble_alloc *palloc;
1501 struct i40iw_pbl *iwpbl;
1502 struct i40iw_mr *iwmr;
1503 enum i40iw_status_code status;
1505 int err_code = -ENOMEM;
1507 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1509 return ERR_PTR(-ENOMEM);
1511 stag = i40iw_create_stag(iwdev);
1513 err_code = -EOVERFLOW;
1517 iwmr->ibmr.rkey = stag;
1518 iwmr->ibmr.lkey = stag;
1520 iwmr->ibmr.device = pd->device;
1521 iwpbl = &iwmr->iwpbl;
1523 iwmr->type = IW_MEMREG_TYPE_MEM;
1524 palloc = &iwpbl->pble_alloc;
1525 iwmr->page_cnt = max_num_sg;
1526 mutex_lock(&iwdev->pbl_mutex);
1527 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1528 mutex_unlock(&iwdev->pbl_mutex);
1532 if (palloc->level != I40IW_LEVEL_1)
1534 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1537 iwpbl->pbl_allocated = true;
1538 i40iw_add_pdusecount(iwpd);
1541 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1543 i40iw_free_stag(iwdev, stag);
1546 return ERR_PTR(err_code);
1550 * i40iw_set_page - populate pbl list for fmr
1551 * @ibmr: ib mem to access iwarp mr pointer
1552 * @addr: page dma address fro pbl list
1554 static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1556 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1557 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1558 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1561 if (unlikely(iwmr->npages == iwmr->page_cnt))
1564 pbl = (u64 *)palloc->level1.addr;
1565 pbl[iwmr->npages++] = cpu_to_le64(addr);
1570 * i40iw_map_mr_sg - map of sg list for fmr
1571 * @ibmr: ib mem to access iwarp mr pointer
1572 * @sg: scatter gather list for fmr
1573 * @sg_nents: number of sg pages
1575 static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1576 int sg_nents, unsigned int *sg_offset)
1578 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1581 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
1585 * i40iw_drain_sq - drain the send queue
1586 * @ibqp: ib qp pointer
1588 static void i40iw_drain_sq(struct ib_qp *ibqp)
1590 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1591 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1593 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1594 wait_for_completion(&iwqp->sq_drained);
1598 * i40iw_drain_rq - drain the receive queue
1599 * @ibqp: ib qp pointer
1601 static void i40iw_drain_rq(struct ib_qp *ibqp)
1603 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1604 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1606 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1607 wait_for_completion(&iwqp->rq_drained);
1611 * i40iw_hwreg_mr - send cqp command for memory registration
1612 * @iwdev: iwarp device
1613 * @iwmr: iwarp mr pointer
1614 * @access: access for MR
1616 static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1617 struct i40iw_mr *iwmr,
1620 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1621 struct i40iw_reg_ns_stag_info *stag_info;
1622 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1623 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1624 enum i40iw_status_code status;
1626 struct i40iw_cqp_request *cqp_request;
1627 struct cqp_commands_info *cqp_info;
1629 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1633 cqp_info = &cqp_request->info;
1634 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1635 memset(stag_info, 0, sizeof(*stag_info));
1636 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1637 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1638 stag_info->stag_key = (u8)iwmr->stag;
1639 stag_info->total_len = iwmr->length;
1640 stag_info->access_rights = access;
1641 stag_info->pd_id = iwpd->sc_pd.pd_id;
1642 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
1644 if (iwmr->page_cnt > 1) {
1645 if (palloc->level == I40IW_LEVEL_1) {
1646 stag_info->first_pm_pbl_index = palloc->level1.idx;
1647 stag_info->chunk_size = 1;
1649 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1650 stag_info->chunk_size = 3;
1653 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1656 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1657 cqp_info->post_sq = 1;
1658 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1659 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1661 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1664 i40iw_pr_err("CQP-OP MR Reg fail");
1670 * i40iw_reg_user_mr - Register a user memory region
1672 * @start: virtual start address
1673 * @length: length of mr
1674 * @virt: virtual address
1675 * @acc: access of mr
1678 static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1683 struct ib_udata *udata)
1685 struct i40iw_pd *iwpd = to_iwpd(pd);
1686 struct i40iw_device *iwdev = to_iwdev(pd->device);
1687 struct i40iw_ucontext *ucontext;
1688 struct i40iw_pble_alloc *palloc;
1689 struct i40iw_pbl *iwpbl;
1690 struct i40iw_mr *iwmr;
1691 struct ib_umem *region;
1692 struct i40iw_mem_reg_req req;
1697 bool use_pbles = false;
1698 unsigned long flags;
1701 if (length > I40IW_MAX_MR_SIZE)
1702 return ERR_PTR(-EINVAL);
1703 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1705 return (struct ib_mr *)region;
1707 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1708 ib_umem_release(region);
1709 return ERR_PTR(-EFAULT);
1712 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1714 ib_umem_release(region);
1715 return ERR_PTR(-ENOMEM);
1718 iwpbl = &iwmr->iwpbl;
1720 iwmr->region = region;
1722 iwmr->ibmr.device = pd->device;
1723 ucontext = to_ucontext(pd->uobject->context);
1724 region_length = region->length + (start & 0xfff);
1725 pbl_depth = region_length >> 12;
1726 pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
1727 iwmr->length = region->length;
1729 iwpbl->user_base = virt;
1730 palloc = &iwpbl->pble_alloc;
1732 iwmr->type = req.reg_type;
1733 iwmr->page_cnt = (u32)pbl_depth;
1735 switch (req.reg_type) {
1736 case IW_MEMREG_TYPE_QP:
1737 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1738 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1741 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1742 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1743 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1745 case IW_MEMREG_TYPE_CQ:
1746 use_pbles = (req.cq_pages > 1);
1747 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1751 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1752 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1753 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1755 case IW_MEMREG_TYPE_MEM:
1756 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1758 use_pbles = (iwmr->page_cnt != 1);
1759 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1763 access |= i40iw_get_user_access(acc);
1764 stag = i40iw_create_stag(iwdev);
1771 iwmr->ibmr.rkey = stag;
1772 iwmr->ibmr.lkey = stag;
1774 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1776 i40iw_free_stag(iwdev, stag);
1784 iwmr->type = req.reg_type;
1785 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1786 i40iw_add_pdusecount(iwpd);
1790 if (palloc->level != I40IW_LEVEL_0)
1791 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1792 ib_umem_release(region);
1794 return ERR_PTR(err);
1798 * i40iw_reg_phys_mr - register kernel physical memory
1800 * @addr: physical address of memory to register
1801 * @size: size of memory to register
1802 * @acc: Access rights
1803 * @iova_start: start of virtual address for physical buffers
1805 struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1811 struct i40iw_pd *iwpd = to_iwpd(pd);
1812 struct i40iw_device *iwdev = to_iwdev(pd->device);
1813 struct i40iw_pbl *iwpbl;
1814 struct i40iw_mr *iwmr;
1815 enum i40iw_status_code status;
1817 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1820 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1822 return ERR_PTR(-ENOMEM);
1824 iwmr->ibmr.device = pd->device;
1825 iwpbl = &iwmr->iwpbl;
1827 iwmr->type = IW_MEMREG_TYPE_MEM;
1828 iwpbl->user_base = *iova_start;
1829 stag = i40iw_create_stag(iwdev);
1834 access |= i40iw_get_user_access(acc);
1836 iwmr->ibmr.rkey = stag;
1837 iwmr->ibmr.lkey = stag;
1839 iwmr->pgaddrmem[0] = addr;
1840 iwmr->length = size;
1841 status = i40iw_hwreg_mr(iwdev, iwmr, access);
1843 i40iw_free_stag(iwdev, stag);
1848 i40iw_add_pdusecount(iwpd);
1852 return ERR_PTR(ret);
1856 * i40iw_get_dma_mr - register physical mem
1858 * @acc: access for memory
1860 static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
1864 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
1868 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
1869 * @iwmr: iwmr for IB's user page addresses
1870 * @ucontext: ptr to user context
1872 static void i40iw_del_memlist(struct i40iw_mr *iwmr,
1873 struct i40iw_ucontext *ucontext)
1875 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1876 unsigned long flags;
1878 switch (iwmr->type) {
1879 case IW_MEMREG_TYPE_CQ:
1880 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1881 if (!list_empty(&ucontext->cq_reg_mem_list))
1882 list_del(&iwpbl->list);
1883 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1885 case IW_MEMREG_TYPE_QP:
1886 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1887 if (!list_empty(&ucontext->qp_reg_mem_list))
1888 list_del(&iwpbl->list);
1889 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1897 * i40iw_dereg_mr - deregister mr
1898 * @ib_mr: mr ptr for dereg
1900 static int i40iw_dereg_mr(struct ib_mr *ib_mr)
1902 struct ib_pd *ibpd = ib_mr->pd;
1903 struct i40iw_pd *iwpd = to_iwpd(ibpd);
1904 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
1905 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
1906 enum i40iw_status_code status;
1907 struct i40iw_dealloc_stag_info *info;
1908 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1909 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1910 struct i40iw_cqp_request *cqp_request;
1911 struct cqp_commands_info *cqp_info;
1915 ib_umem_release(iwmr->region);
1917 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
1918 if (ibpd->uobject) {
1919 struct i40iw_ucontext *ucontext;
1921 ucontext = to_ucontext(ibpd->uobject->context);
1922 i40iw_del_memlist(iwmr, ucontext);
1924 if (iwpbl->pbl_allocated)
1925 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1930 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1934 cqp_info = &cqp_request->info;
1935 info = &cqp_info->in.u.dealloc_stag.info;
1936 memset(info, 0, sizeof(*info));
1938 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
1939 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
1940 stag_idx = info->stag_idx;
1942 if (iwpbl->pbl_allocated)
1943 info->dealloc_pbl = true;
1945 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
1946 cqp_info->post_sq = 1;
1947 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
1948 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
1949 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1951 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
1952 i40iw_rem_pdusecount(iwpd, iwdev);
1953 i40iw_free_stag(iwdev, iwmr->stag);
1954 if (iwpbl->pbl_allocated)
1955 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1963 static ssize_t i40iw_show_rev(struct device *dev,
1964 struct device_attribute *attr, char *buf)
1966 struct i40iw_ib_device *iwibdev = container_of(dev,
1967 struct i40iw_ib_device,
1969 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
1971 return sprintf(buf, "%x\n", hw_rev);
1977 static ssize_t i40iw_show_hca(struct device *dev,
1978 struct device_attribute *attr, char *buf)
1980 return sprintf(buf, "I40IW\n");
1986 static ssize_t i40iw_show_board(struct device *dev,
1987 struct device_attribute *attr,
1990 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
1993 static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
1994 static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
1995 static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
1997 static struct device_attribute *i40iw_dev_attributes[] = {
2004 * i40iw_copy_sg_list - copy sg list for qp
2005 * @sg_list: copied into sg_list
2006 * @sgl: copy from sgl
2007 * @num_sges: count of sg entries
2009 static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2013 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2014 sg_list[i].tag_off = sgl[i].addr;
2015 sg_list[i].len = sgl[i].length;
2016 sg_list[i].stag = sgl[i].lkey;
2021 * i40iw_post_send - kernel application wr
2022 * @ibqp: qp ptr for wr
2023 * @ib_wr: work request ptr
2024 * @bad_wr: return of bad wr if err
2026 static int i40iw_post_send(struct ib_qp *ibqp,
2027 struct ib_send_wr *ib_wr,
2028 struct ib_send_wr **bad_wr)
2030 struct i40iw_qp *iwqp;
2031 struct i40iw_qp_uk *ukqp;
2032 struct i40iw_post_sq_info info;
2033 enum i40iw_status_code ret;
2035 unsigned long flags;
2038 iwqp = (struct i40iw_qp *)ibqp;
2039 ukqp = &iwqp->sc_qp.qp_uk;
2041 spin_lock_irqsave(&iwqp->lock, flags);
2044 memset(&info, 0, sizeof(info));
2045 info.wr_id = (u64)(ib_wr->wr_id);
2046 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2047 info.signaled = true;
2048 if (ib_wr->send_flags & IB_SEND_FENCE)
2049 info.read_fence = true;
2051 switch (ib_wr->opcode) {
2054 case IB_WR_SEND_WITH_INV:
2055 if (ib_wr->opcode == IB_WR_SEND) {
2056 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2057 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2059 info.op_type = I40IW_OP_TYPE_SEND;
2061 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2062 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2064 info.op_type = I40IW_OP_TYPE_SEND_INV;
2067 if (ib_wr->send_flags & IB_SEND_INLINE) {
2068 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2069 info.op.inline_send.len = ib_wr->sg_list[0].length;
2070 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2072 info.op.send.num_sges = ib_wr->num_sge;
2073 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
2074 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2078 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2084 case IB_WR_RDMA_WRITE:
2085 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2087 if (ib_wr->send_flags & IB_SEND_INLINE) {
2088 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2089 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2090 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2091 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2092 info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
2093 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2095 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2096 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2097 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2098 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2099 info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
2100 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2104 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2110 case IB_WR_RDMA_READ_WITH_INV:
2113 case IB_WR_RDMA_READ:
2114 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2118 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2119 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2120 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2121 info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
2122 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2123 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2124 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
2125 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
2127 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2133 case IB_WR_LOCAL_INV:
2134 info.op_type = I40IW_OP_TYPE_INV_STAG;
2135 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2136 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2142 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2143 int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
2144 int flags = reg_wr(ib_wr)->access;
2145 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2146 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2147 struct i40iw_fast_reg_stag_info info;
2149 memset(&info, 0, sizeof(info));
2150 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2151 info.access_rights |= i40iw_get_user_access(flags);
2152 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2153 info.stag_idx = reg_wr(ib_wr)->key >> 8;
2154 info.wr_id = ib_wr->wr_id;
2156 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2157 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2158 info.total_len = iwmr->ibmr.length;
2159 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2160 info.first_pm_pbl_index = palloc->level1.idx;
2161 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2162 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2164 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2165 info.chunk_size = 1;
2167 if (page_shift == 21)
2168 info.page_size = 1; /* 2M page */
2170 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2177 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2184 ib_wr = ib_wr->next;
2190 ukqp->ops.iw_qp_post_wr(ukqp);
2191 spin_unlock_irqrestore(&iwqp->lock, flags);
2197 * i40iw_post_recv - post receive wr for kernel application
2198 * @ibqp: ib qp pointer
2199 * @ib_wr: work request for receive
2200 * @bad_wr: bad wr caused an error
2202 static int i40iw_post_recv(struct ib_qp *ibqp,
2203 struct ib_recv_wr *ib_wr,
2204 struct ib_recv_wr **bad_wr)
2206 struct i40iw_qp *iwqp;
2207 struct i40iw_qp_uk *ukqp;
2208 struct i40iw_post_rq_info post_recv;
2209 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2210 enum i40iw_status_code ret = 0;
2211 unsigned long flags;
2214 iwqp = (struct i40iw_qp *)ibqp;
2215 ukqp = &iwqp->sc_qp.qp_uk;
2217 memset(&post_recv, 0, sizeof(post_recv));
2218 spin_lock_irqsave(&iwqp->lock, flags);
2220 post_recv.num_sges = ib_wr->num_sge;
2221 post_recv.wr_id = ib_wr->wr_id;
2222 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2223 post_recv.sg_list = sg_list;
2224 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2226 i40iw_pr_err(" post_recv err %d\n", ret);
2227 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2234 ib_wr = ib_wr->next;
2237 spin_unlock_irqrestore(&iwqp->lock, flags);
2242 * i40iw_poll_cq - poll cq for completion (kernel apps)
2244 * @num_entries: number of entries to poll
2245 * @entry: wr of entry completed
2247 static int i40iw_poll_cq(struct ib_cq *ibcq,
2249 struct ib_wc *entry)
2251 struct i40iw_cq *iwcq;
2253 struct i40iw_cq_poll_info cq_poll_info;
2254 enum i40iw_status_code ret;
2255 struct i40iw_cq_uk *ukcq;
2256 struct i40iw_sc_qp *qp;
2257 struct i40iw_qp *iwqp;
2258 unsigned long flags;
2260 iwcq = (struct i40iw_cq *)ibcq;
2261 ukcq = &iwcq->sc_cq.cq_uk;
2263 spin_lock_irqsave(&iwcq->lock, flags);
2264 while (cqe_count < num_entries) {
2265 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
2266 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2268 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2275 entry->wc_flags = 0;
2276 entry->wr_id = cq_poll_info.wr_id;
2277 if (cq_poll_info.error) {
2278 entry->status = IB_WC_WR_FLUSH_ERR;
2279 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2281 entry->status = IB_WC_SUCCESS;
2284 switch (cq_poll_info.op_type) {
2285 case I40IW_OP_TYPE_RDMA_WRITE:
2286 entry->opcode = IB_WC_RDMA_WRITE;
2288 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2289 case I40IW_OP_TYPE_RDMA_READ:
2290 entry->opcode = IB_WC_RDMA_READ;
2292 case I40IW_OP_TYPE_SEND_SOL:
2293 case I40IW_OP_TYPE_SEND_SOL_INV:
2294 case I40IW_OP_TYPE_SEND_INV:
2295 case I40IW_OP_TYPE_SEND:
2296 entry->opcode = IB_WC_SEND;
2298 case I40IW_OP_TYPE_REC:
2299 entry->opcode = IB_WC_RECV;
2302 entry->opcode = IB_WC_RECV;
2306 entry->ex.imm_data = 0;
2307 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2308 entry->qp = (struct ib_qp *)qp->back_qp;
2309 entry->src_qp = cq_poll_info.qp_id;
2310 iwqp = (struct i40iw_qp *)qp->back_qp;
2311 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2312 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2313 complete(&iwqp->sq_drained);
2314 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2315 complete(&iwqp->rq_drained);
2317 entry->byte_len = cq_poll_info.bytes_xfered;
2321 spin_unlock_irqrestore(&iwcq->lock, flags);
2326 * i40iw_req_notify_cq - arm cq kernel application
2328 * @notify_flags: notofication flags
2330 static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2331 enum ib_cq_notify_flags notify_flags)
2333 struct i40iw_cq *iwcq;
2334 struct i40iw_cq_uk *ukcq;
2335 unsigned long flags;
2336 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2338 iwcq = (struct i40iw_cq *)ibcq;
2339 ukcq = &iwcq->sc_cq.cq_uk;
2340 if (notify_flags == IB_CQ_SOLICITED)
2341 cq_notify = IW_CQ_COMPL_SOLICITED;
2342 spin_lock_irqsave(&iwcq->lock, flags);
2343 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2344 spin_unlock_irqrestore(&iwcq->lock, flags);
2349 * i40iw_port_immutable - return port's immutable data
2350 * @ibdev: ib dev struct
2351 * @port_num: port number
2352 * @immutable: immutable data for the port return
2354 static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2355 struct ib_port_immutable *immutable)
2357 struct ib_port_attr attr;
2360 err = i40iw_query_port(ibdev, port_num, &attr);
2365 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2366 immutable->gid_tbl_len = attr.gid_tbl_len;
2367 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2372 static const char * const i40iw_hw_stat_names[] = {
2374 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2375 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2376 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2377 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2378 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2379 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2380 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2381 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2382 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2384 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2386 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2388 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2390 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2392 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2394 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2396 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2398 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2400 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2402 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2404 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2406 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2408 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2410 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2412 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2414 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2416 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2418 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2420 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2422 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2424 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2426 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2428 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2430 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2432 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2434 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2438 static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
2441 u32 firmware_version = I40IW_FW_VERSION;
2443 snprintf(str, str_len, "%u.%u", firmware_version,
2444 (firmware_version & 0x000000ff));
2448 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2449 * @ibdev: device pointer from stack
2450 * @port_num: port number
2452 static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2455 struct i40iw_device *iwdev = to_iwdev(ibdev);
2456 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2457 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2458 I40IW_HW_STAT_INDEX_MAX_64;
2459 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2461 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2462 (I40IW_HW_STAT_INDEX_MAX_32 +
2463 I40IW_HW_STAT_INDEX_MAX_64));
2466 * PFs get the default update lifespan, but VFs only update once
2471 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2476 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2477 * @ibdev: device pointer from stack
2478 * @stats: stats pointer from stack
2479 * @port_num: port number
2480 * @index: which hw counter the stack is requesting we update
2482 static int i40iw_get_hw_stats(struct ib_device *ibdev,
2483 struct rdma_hw_stats *stats,
2484 u8 port_num, int index)
2486 struct i40iw_device *iwdev = to_iwdev(ibdev);
2487 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2488 struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
2489 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2490 unsigned long flags;
2493 spin_lock_irqsave(&devstat->stats_lock, flags);
2494 devstat->ops.iw_hw_stat_read_all(devstat,
2495 &devstat->hw_stats);
2496 spin_unlock_irqrestore(&devstat->stats_lock, flags);
2498 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2502 memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
2504 return stats->num_counters;
2508 * i40iw_query_gid - Query port GID
2509 * @ibdev: device pointer from stack
2510 * @port: port number
2511 * @index: Entry index
2514 static int i40iw_query_gid(struct ib_device *ibdev,
2519 struct i40iw_device *iwdev = to_iwdev(ibdev);
2521 memset(gid->raw, 0, sizeof(gid->raw));
2522 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2527 * i40iw_modify_port Modify port properties
2528 * @ibdev: device pointer from stack
2529 * @port: port number
2530 * @port_modify_mask: mask for port modifications
2531 * @props: port properties
2533 static int i40iw_modify_port(struct ib_device *ibdev,
2535 int port_modify_mask,
2536 struct ib_port_modify *props)
2542 * i40iw_query_pkey - Query partition key
2543 * @ibdev: device pointer from stack
2544 * @port: port number
2545 * @index: index of pkey
2546 * @pkey: pointer to store the pkey
2548 static int i40iw_query_pkey(struct ib_device *ibdev,
2558 * i40iw_create_ah - create address handle
2560 * @ah_attr: address handle attributes
2562 static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
2563 struct ib_ah_attr *attr)
2565 return ERR_PTR(-ENOSYS);
2569 * i40iw_destroy_ah - Destroy address handle
2570 * @ah: pointer to address handle
2572 static int i40iw_destroy_ah(struct ib_ah *ah)
2578 * i40iw_init_rdma_device - initialization of iwarp device
2579 * @iwdev: iwarp device
2581 static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2583 struct i40iw_ib_device *iwibdev;
2584 struct net_device *netdev = iwdev->netdev;
2585 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2587 iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2589 i40iw_pr_err("iwdev == NULL\n");
2592 strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2593 iwibdev->ibdev.owner = THIS_MODULE;
2594 iwdev->iwibdev = iwibdev;
2595 iwibdev->iwdev = iwdev;
2597 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2598 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2600 iwibdev->ibdev.uverbs_cmd_mask =
2601 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2602 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2603 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2604 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2605 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2606 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2607 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2608 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2609 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2610 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2611 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2612 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2613 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2614 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2615 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2616 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2617 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2618 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2619 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2620 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2621 iwibdev->ibdev.phys_port_cnt = 1;
2622 iwibdev->ibdev.num_comp_vectors = 1;
2623 iwibdev->ibdev.dma_device = &pcidev->dev;
2624 iwibdev->ibdev.dev.parent = &pcidev->dev;
2625 iwibdev->ibdev.query_port = i40iw_query_port;
2626 iwibdev->ibdev.modify_port = i40iw_modify_port;
2627 iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2628 iwibdev->ibdev.query_gid = i40iw_query_gid;
2629 iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2630 iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2631 iwibdev->ibdev.mmap = i40iw_mmap;
2632 iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2633 iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2634 iwibdev->ibdev.create_qp = i40iw_create_qp;
2635 iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2636 iwibdev->ibdev.query_qp = i40iw_query_qp;
2637 iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2638 iwibdev->ibdev.create_cq = i40iw_create_cq;
2639 iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2640 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2641 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2642 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
2643 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2644 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
2645 iwibdev->ibdev.query_device = i40iw_query_device;
2646 iwibdev->ibdev.create_ah = i40iw_create_ah;
2647 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
2648 iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2649 iwibdev->ibdev.drain_rq = i40iw_drain_rq;
2650 iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2651 iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
2652 iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2653 if (!iwibdev->ibdev.iwcm) {
2654 ib_dealloc_device(&iwibdev->ibdev);
2655 i40iw_pr_err("iwcm == NULL\n");
2659 iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2660 iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2661 iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2662 iwibdev->ibdev.iwcm->connect = i40iw_connect;
2663 iwibdev->ibdev.iwcm->accept = i40iw_accept;
2664 iwibdev->ibdev.iwcm->reject = i40iw_reject;
2665 iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2666 iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
2667 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2668 sizeof(iwibdev->ibdev.iwcm->ifname));
2669 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
2670 iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
2671 iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2672 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2673 iwibdev->ibdev.post_send = i40iw_post_send;
2674 iwibdev->ibdev.post_recv = i40iw_post_recv;
2680 * i40iw_port_ibevent - indicate port event
2681 * @iwdev: iwarp device
2683 void i40iw_port_ibevent(struct i40iw_device *iwdev)
2685 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2686 struct ib_event event;
2688 event.device = &iwibdev->ibdev;
2689 event.element.port_num = 1;
2690 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2691 ib_dispatch_event(&event);
2695 * i40iw_unregister_rdma_device - unregister of iwarp from IB
2696 * @iwibdev: rdma device ptr
2698 static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2702 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2703 device_remove_file(&iwibdev->ibdev.dev,
2704 i40iw_dev_attributes[i]);
2705 ib_unregister_device(&iwibdev->ibdev);
2709 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2710 * @iwibdev: IB device ptr
2712 void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2717 i40iw_unregister_rdma_device(iwibdev);
2718 kfree(iwibdev->ibdev.iwcm);
2719 iwibdev->ibdev.iwcm = NULL;
2720 ib_dealloc_device(&iwibdev->ibdev);
2724 * i40iw_register_rdma_device - register iwarp device to IB
2725 * @iwdev: iwarp device
2727 int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2730 struct i40iw_ib_device *iwibdev;
2732 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2733 if (!iwdev->iwibdev)
2735 iwibdev = iwdev->iwibdev;
2737 ret = ib_register_device(&iwibdev->ibdev, NULL);
2741 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2743 device_create_file(&iwibdev->ibdev.dev,
2744 i40iw_dev_attributes[i]);
2748 device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2750 ib_unregister_device(&iwibdev->ibdev);
2756 kfree(iwdev->iwibdev->ibdev.iwcm);
2757 iwdev->iwibdev->ibdev.iwcm = NULL;
2758 ib_dealloc_device(&iwdev->iwibdev->ibdev);