X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=drivers%2Fstaging%2Frdma%2Fhfi1%2Fverbs.c;h=9beb0aa876f073c8eaad7fd119118b55b77dcf6c;hb=25912ee10d64eba7f267b475afcf5d6ad4249ab3;hp=41bb59eb001c72fe214a4114047fdf03156f9007;hpb=cb1a14b95eee86d7e20bfa2c010aab77b021f62e;p=karo-tx-linux.git diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 41bb59eb001c..9beb0aa876f0 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -129,6 +129,9 @@ static void verbs_sdma_complete( int status, int drained); +/* Length of buffer to create verbs txreq cache name */ +#define TXREQ_NAME_LEN 24 + /* * Note that it is OK to post send work requests in the SQE and ERR * states; hfi1_do_send() will process them and generate error @@ -380,9 +383,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ - if (wr->opcode == IB_WR_FAST_REG_MR) { - return -EINVAL; - } else if (qp->ibqp.qp_type == IB_QPT_UC) { + if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) return -EINVAL; } else if (qp->ibqp.qp_type != IB_QPT_RC) { @@ -391,7 +392,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) wr->opcode != IB_WR_SEND_WITH_IMM) return -EINVAL; /* Check UD destination address PD */ - if (qp->ibqp.pd != wr->wr.ud.ah->pd) + if (qp->ibqp.pd != ud_wr(wr)->ah->pd) return -EINVAL; } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) return -EINVAL; @@ -412,7 +413,21 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) rkt = &to_idev(qp->ibqp.device)->lk_table; pd = to_ipd(qp->ibqp.pd); wqe = get_swqe_ptr(qp, qp->s_head); - wqe->wr = *wr; + + + if (qp->ibqp.qp_type != IB_QPT_UC && + qp->ibqp.qp_type != IB_QPT_RC) + memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); + else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || + wr->opcode == IB_WR_RDMA_WRITE || + wr->opcode == IB_WR_RDMA_READ) + memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); + else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); + else + memcpy(&wqe->wr, wr, sizeof(wqe->wr)); + wqe->length = 0; j = 0; if (wr->num_sge) { @@ -438,7 +453,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) if (wqe->length > 0x80000000U) goto bail_inval_free; } else { - struct hfi1_ah *ah = to_iah(wr->wr.ud.ah); + struct hfi1_ah *ah = to_iah(ud_wr(wr)->ah); atomic_inc(&ah->refcount); } @@ -597,6 +612,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) u32 tlen = packet->tlen; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = &ppd->ibport_data; + unsigned long flags; u32 qp_num; int lnh; u8 opcode; @@ -639,10 +655,10 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) goto drop; list_for_each_entry_rcu(p, &mcast->qp_list, list) { packet->qp = p->qp; - spin_lock(&packet->qp->r_lock); + spin_lock_irqsave(&packet->qp->r_lock, flags); if (likely((qp_ok(opcode, packet)))) opcode_handler_tbl[opcode](packet); - spin_unlock(&packet->qp->r_lock); + spin_unlock_irqrestore(&packet->qp->r_lock, flags); } /* * Notify hfi1_multicast_detach() if it is waiting for us @@ -657,10 +673,10 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) rcu_read_unlock(); goto drop; } - spin_lock(&packet->qp->r_lock); + spin_lock_irqsave(&packet->qp->r_lock, flags); if (likely((qp_ok(opcode, packet)))) opcode_handler_tbl[opcode](packet); - spin_unlock(&packet->qp->r_lock); + spin_unlock_irqrestore(&packet->qp->r_lock, flags); rcu_read_unlock(); } return; @@ -1199,6 +1215,7 @@ pio_bail: } return 0; } + /* * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent * being an entry from the ingress partition key table), return 0 @@ -1884,7 +1901,7 @@ static void init_ibport(struct hfi1_pportdata *ppd) static void verbs_txreq_kmem_cache_ctor(void *obj) { - struct verbs_txreq *tx = (struct verbs_txreq *)obj; + struct verbs_txreq *tx = obj; memset(tx, 0, sizeof(*tx)); } @@ -1903,6 +1920,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) int ret; size_t lcpysz = IB_DEVICE_NAME_MAX; u16 descq_cnt; + char buf[TXREQ_NAME_LEN]; ret = hfi1_qp_init(dev); if (ret) @@ -1956,8 +1974,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) descq_cnt = sdma_get_descq_cnt(); + snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit); /* SLAB_HWCACHE_ALIGN for AHG */ - dev->verbs_txreq_cache = kmem_cache_create("hfi1_vtxreq_cache", + dev->verbs_txreq_cache = kmem_cache_create(buf, sizeof(struct verbs_txreq), 0, SLAB_HWCACHE_ALIGN, verbs_txreq_kmem_cache_ctor); @@ -2048,8 +2067,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->reg_user_mr = hfi1_reg_user_mr; ibdev->dereg_mr = hfi1_dereg_mr; ibdev->alloc_mr = hfi1_alloc_mr; - ibdev->alloc_fast_reg_page_list = hfi1_alloc_fast_reg_page_list; - ibdev->free_fast_reg_page_list = hfi1_free_fast_reg_page_list; ibdev->alloc_fmr = hfi1_alloc_fmr; ibdev->map_phys_fmr = hfi1_map_phys_fmr; ibdev->unmap_fmr = hfi1_unmap_fmr;