3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
59 * Convert the AETH RNR timeout code into the number of microseconds.
61 const u32 ib_hfi1_rnr_table[32] = {
62 655360, /* 00: 655.36 */
82 10240, /* 14: 10.24 */
83 15360, /* 15: 15.36 */
84 20480, /* 16: 20.48 */
85 30720, /* 17: 30.72 */
86 40960, /* 18: 40.96 */
87 61440, /* 19: 61.44 */
88 81920, /* 1A: 81.92 */
89 122880, /* 1B: 122.88 */
90 163840, /* 1C: 163.84 */
91 245760, /* 1D: 245.76 */
92 327680, /* 1E: 327.68 */
93 491520 /* 1F: 491.52 */
97 * Validate a RWQE and fill in the SGE state.
100 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
104 struct rvt_lkey_table *rkt;
106 struct rvt_sge_state *ss;
108 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
109 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
111 ss->sg_list = qp->r_sg_list;
113 for (i = j = 0; i < wqe->num_sge; i++) {
114 if (wqe->sg_list[i].length == 0)
117 if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
118 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
120 qp->r_len += wqe->sg_list[i].length;
124 ss->total_len = qp->r_len;
130 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
135 memset(&wc, 0, sizeof(wc));
136 wc.wr_id = wqe->wr_id;
137 wc.status = IB_WC_LOC_PROT_ERR;
138 wc.opcode = IB_WC_RECV;
140 /* Signal solicited completion event. */
141 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
148 * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
150 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
152 * Return -1 if there is a local error, 0 if no RWQE is available,
153 * otherwise return 1.
155 * Can be called from interrupt level.
157 int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
163 struct rvt_rwqe *wqe;
164 void (*handler)(struct ib_event *, void *);
169 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
170 handler = srq->ibsrq.event_handler;
178 spin_lock_irqsave(&rq->lock, flags);
179 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
186 /* Validate tail before using it since it is user writable. */
187 if (tail >= rq->size)
189 if (unlikely(tail == wq->head)) {
193 /* Make sure entry is read after head index is read. */
195 wqe = rvt_get_rwqe_ptr(rq, tail);
197 * Even though we update the tail index in memory, the verbs
198 * consumer is not supposed to post more entries until a
199 * completion is generated.
201 if (++tail >= rq->size)
204 if (!wr_id_only && !init_sge(qp, wqe)) {
208 qp->r_wr_id = wqe->wr_id;
211 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
216 * Validate head pointer value and compute
217 * the number of remaining WQEs.
223 n += rq->size - tail;
226 if (n < srq->limit) {
230 spin_unlock_irqrestore(&rq->lock, flags);
231 ev.device = qp->ibqp.device;
232 ev.element.srq = qp->ibqp.srq;
233 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
234 handler(&ev, srq->ibsrq.srq_context);
239 spin_unlock_irqrestore(&rq->lock, flags);
244 static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
247 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
249 return cpu_to_be64(ppd->guid);
251 return ibp->guids[index - 1];
254 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
256 return (gid->global.interface_id == id &&
257 (gid->global.subnet_prefix == gid_prefix ||
258 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
263 * This should be called with the QP r_lock held.
265 * The s_lock will be acquired around the hfi1_migrate_qp() call.
267 int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
268 int has_grh, struct rvt_qp *qp, u32 bth0)
272 u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
274 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
276 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
279 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
281 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
282 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
285 if (!gid_ok(&hdr->u.l.grh.sgid,
286 qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
287 qp->alt_ah_attr.grh.dgid.global.interface_id))
290 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
291 sc5, be16_to_cpu(hdr->lrh[3])))) {
292 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
294 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
296 be16_to_cpu(hdr->lrh[3]),
297 be16_to_cpu(hdr->lrh[1]));
300 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
301 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
302 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
304 spin_lock_irqsave(&qp->s_lock, flags);
306 spin_unlock_irqrestore(&qp->s_lock, flags);
309 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
312 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
314 guid = get_sguid(ibp,
315 qp->remote_ah_attr.grh.sgid_index);
316 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
319 if (!gid_ok(&hdr->u.l.grh.sgid,
320 qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
321 qp->remote_ah_attr.grh.dgid.global.interface_id))
324 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
325 sc5, be16_to_cpu(hdr->lrh[3])))) {
326 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
328 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
330 be16_to_cpu(hdr->lrh[3]),
331 be16_to_cpu(hdr->lrh[1]));
334 /* Validate the SLID. See Ch. 9.6.1.5 */
335 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
336 ppd_from_ibp(ibp)->port != qp->port_num)
338 if (qp->s_mig_state == IB_MIG_REARM &&
339 !(bth0 & IB_BTH_MIG_REQ))
340 qp->s_mig_state = IB_MIG_ARMED;
350 * ruc_loopback - handle UC and RC loopback requests
351 * @sqp: the sending QP
353 * This is called from hfi1_do_send() to
354 * forward a WQE addressed to the same HFI.
355 * Note that although we are single threaded due to the tasklet, we still
356 * have to protect against post_send(). We don't have to worry about
357 * receive interrupts since this is a connected protocol and all packets
358 * will pass through here.
360 static void ruc_loopback(struct rvt_qp *sqp)
362 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
364 struct rvt_swqe *wqe;
370 enum ib_wc_status send_status;
379 * Note that we check the responder QP state after
380 * checking the requester's state.
382 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
385 spin_lock_irqsave(&sqp->s_lock, flags);
387 /* Return if we are already busy processing a work request. */
388 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
389 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
392 sqp->s_flags |= RVT_S_BUSY;
395 smp_read_barrier_depends(); /* see post_one_send() */
396 if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
398 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
400 /* Return if it is not OK to start a new work request. */
401 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
402 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
404 /* We are in the error state, flush the work request. */
405 send_status = IB_WC_WR_FLUSH_ERR;
410 * We can rely on the entry not changing without the s_lock
411 * being held until we update s_last.
412 * We increment s_cur to indicate s_last is in progress.
414 if (sqp->s_last == sqp->s_cur) {
415 if (++sqp->s_cur >= sqp->s_size)
418 spin_unlock_irqrestore(&sqp->s_lock, flags);
420 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
421 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
422 ibp->rvp.n_pkt_drops++;
424 * For RC, the requester would timeout and retry so
425 * shortcut the timeouts and just signal too many retries.
427 if (sqp->ibqp.qp_type == IB_QPT_RC)
428 send_status = IB_WC_RETRY_EXC_ERR;
430 send_status = IB_WC_SUCCESS;
434 memset(&wc, 0, sizeof(wc));
435 send_status = IB_WC_SUCCESS;
438 sqp->s_sge.sge = wqe->sg_list[0];
439 sqp->s_sge.sg_list = wqe->sg_list + 1;
440 sqp->s_sge.num_sge = wqe->wr.num_sge;
441 sqp->s_len = wqe->length;
442 switch (wqe->wr.opcode) {
443 case IB_WR_SEND_WITH_IMM:
444 wc.wc_flags = IB_WC_WITH_IMM;
445 wc.ex.imm_data = wqe->wr.ex.imm_data;
448 ret = hfi1_rvt_get_rwqe(qp, 0);
455 case IB_WR_RDMA_WRITE_WITH_IMM:
456 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
458 wc.wc_flags = IB_WC_WITH_IMM;
459 wc.ex.imm_data = wqe->wr.ex.imm_data;
460 ret = hfi1_rvt_get_rwqe(qp, 1);
465 /* skip copy_last set and qp_access_flags recheck */
467 case IB_WR_RDMA_WRITE:
468 copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
469 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
472 if (wqe->length == 0)
473 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
474 wqe->rdma_wr.remote_addr,
476 IB_ACCESS_REMOTE_WRITE)))
478 qp->r_sge.sg_list = NULL;
479 qp->r_sge.num_sge = 1;
480 qp->r_sge.total_len = wqe->length;
483 case IB_WR_RDMA_READ:
484 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
486 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
487 wqe->rdma_wr.remote_addr,
489 IB_ACCESS_REMOTE_READ)))
492 sqp->s_sge.sg_list = NULL;
493 sqp->s_sge.num_sge = 1;
494 qp->r_sge.sge = wqe->sg_list[0];
495 qp->r_sge.sg_list = wqe->sg_list + 1;
496 qp->r_sge.num_sge = wqe->wr.num_sge;
497 qp->r_sge.total_len = wqe->length;
500 case IB_WR_ATOMIC_CMP_AND_SWP:
501 case IB_WR_ATOMIC_FETCH_AND_ADD:
502 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
504 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
505 wqe->atomic_wr.remote_addr,
507 IB_ACCESS_REMOTE_ATOMIC)))
509 /* Perform atomic OP and save result. */
510 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
511 sdata = wqe->atomic_wr.compare_add;
512 *(u64 *) sqp->s_sge.sge.vaddr =
513 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
514 (u64) atomic64_add_return(sdata, maddr) - sdata :
515 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
516 sdata, wqe->atomic_wr.swap);
517 rvt_put_mr(qp->r_sge.sge.mr);
518 qp->r_sge.num_sge = 0;
522 send_status = IB_WC_LOC_QP_OP_ERR;
526 sge = &sqp->s_sge.sge;
528 u32 len = sqp->s_len;
530 if (len > sge->length)
532 if (len > sge->sge_length)
533 len = sge->sge_length;
534 WARN_ON_ONCE(len == 0);
535 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
538 sge->sge_length -= len;
539 if (sge->sge_length == 0) {
542 if (--sqp->s_sge.num_sge)
543 *sge = *sqp->s_sge.sg_list++;
544 } else if (sge->length == 0 && sge->mr->lkey) {
545 if (++sge->n >= RVT_SEGSZ) {
546 if (++sge->m >= sge->mr->mapsz)
551 sge->mr->map[sge->m]->segs[sge->n].vaddr;
553 sge->mr->map[sge->m]->segs[sge->n].length;
558 rvt_put_ss(&qp->r_sge);
560 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
563 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
564 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
566 wc.opcode = IB_WC_RECV;
567 wc.wr_id = qp->r_wr_id;
568 wc.status = IB_WC_SUCCESS;
569 wc.byte_len = wqe->length;
571 wc.src_qp = qp->remote_qpn;
572 wc.slid = qp->remote_ah_attr.dlid;
573 wc.sl = qp->remote_ah_attr.sl;
575 /* Signal completion event if the solicited bit is set. */
576 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
577 wqe->wr.send_flags & IB_SEND_SOLICITED);
580 spin_lock_irqsave(&sqp->s_lock, flags);
581 ibp->rvp.n_loop_pkts++;
583 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
584 hfi1_send_complete(sqp, wqe, send_status);
589 if (qp->ibqp.qp_type == IB_QPT_UC)
591 ibp->rvp.n_rnr_naks++;
593 * Note: we don't need the s_lock held since the BUSY flag
594 * makes this single threaded.
596 if (sqp->s_rnr_retry == 0) {
597 send_status = IB_WC_RNR_RETRY_EXC_ERR;
600 if (sqp->s_rnr_retry_cnt < 7)
602 spin_lock_irqsave(&sqp->s_lock, flags);
603 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
605 to = ib_hfi1_rnr_table[qp->r_min_rnr_timer];
606 hfi1_add_rnr_timer(sqp, to);
610 send_status = IB_WC_REM_OP_ERR;
611 wc.status = IB_WC_LOC_QP_OP_ERR;
615 send_status = IB_WC_REM_INV_REQ_ERR;
616 wc.status = IB_WC_LOC_QP_OP_ERR;
620 send_status = IB_WC_REM_ACCESS_ERR;
621 wc.status = IB_WC_LOC_PROT_ERR;
623 /* responder goes to error state */
624 hfi1_rc_error(qp, wc.status);
627 spin_lock_irqsave(&sqp->s_lock, flags);
628 hfi1_send_complete(sqp, wqe, send_status);
629 if (sqp->ibqp.qp_type == IB_QPT_RC) {
630 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
632 sqp->s_flags &= ~RVT_S_BUSY;
633 spin_unlock_irqrestore(&sqp->s_lock, flags);
637 ev.device = sqp->ibqp.device;
638 ev.element.qp = &sqp->ibqp;
639 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
640 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
645 sqp->s_flags &= ~RVT_S_BUSY;
647 spin_unlock_irqrestore(&sqp->s_lock, flags);
653 * hfi1_make_grh - construct a GRH header
654 * @ibp: a pointer to the IB port
655 * @hdr: a pointer to the GRH header being constructed
656 * @grh: the global route address to send to
657 * @hwords: the number of 32 bit words of header being sent
658 * @nwords: the number of 32 bit words of data being sent
660 * Return the size of the header in 32 bit words.
662 u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
663 struct ib_global_route *grh, u32 hwords, u32 nwords)
665 hdr->version_tclass_flow =
666 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
667 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
668 (grh->flow_label << IB_GRH_FLOW_SHIFT));
669 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
670 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
671 hdr->next_hdr = IB_GRH_NEXT_HDR;
672 hdr->hop_limit = grh->hop_limit;
673 /* The SGID is 32-bit aligned. */
674 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
675 hdr->sgid.global.interface_id =
676 grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
677 ibp->guids[grh->sgid_index - 1] :
678 cpu_to_be64(ppd_from_ibp(ibp)->guid);
679 hdr->dgid = grh->dgid;
681 /* GRH header size in 32-bit words. */
682 return sizeof(struct ib_grh) / sizeof(u32);
685 #define BTH2_OFFSET (offsetof(struct hfi1_pio_header, hdr.u.oth.bth[2]) / 4)
688 * build_ahg - create ahg in s_hdr
689 * @qp: a pointer to QP
690 * @npsn: the next PSN for the request/response
692 * This routine handles the AHG by allocating an ahg entry and causing the
693 * copy of the first middle.
695 * Subsequent middles use the copied entry, editing the
696 * PSN with 1 or 2 edits.
698 static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
700 struct hfi1_qp_priv *priv = qp->priv;
701 if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
703 if (!(qp->s_flags & RVT_S_AHG_VALID)) {
704 /* first middle that needs copy */
705 if (qp->s_ahgidx < 0)
706 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
707 if (qp->s_ahgidx >= 0) {
709 priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
710 /* save to protect a change in another thread */
711 priv->s_hdr->sde = priv->s_sde;
712 priv->s_hdr->ahgidx = qp->s_ahgidx;
713 qp->s_flags |= RVT_S_AHG_VALID;
716 /* subsequent middle after valid */
717 if (qp->s_ahgidx >= 0) {
718 priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
719 priv->s_hdr->ahgidx = qp->s_ahgidx;
720 priv->s_hdr->ahgcount++;
721 priv->s_hdr->ahgdesc[0] =
722 sdma_build_ahg_descriptor(
723 (__force u16)cpu_to_be16((u16)npsn),
727 if ((npsn & 0xffff0000) !=
728 (qp->s_ahgpsn & 0xffff0000)) {
729 priv->s_hdr->ahgcount++;
730 priv->s_hdr->ahgdesc[1] =
731 sdma_build_ahg_descriptor(
732 (__force u16)cpu_to_be16(
742 void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
743 u32 bth0, u32 bth2, int middle)
745 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
746 struct hfi1_qp_priv *priv = qp->priv;
752 /* Construct the header. */
753 extra_bytes = -qp->s_cur_size & 3;
754 nwords = (qp->s_cur_size + extra_bytes) >> 2;
756 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
757 qp->s_hdrwords += hfi1_make_grh(ibp, &priv->s_hdr->ibh.u.l.grh,
758 &qp->remote_ah_attr.grh,
759 qp->s_hdrwords, nwords);
763 lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
765 * reset s_hdr/AHG fields
767 * This insures that the ahgentry/ahgcount
768 * are at a non-AHG default to protect
769 * build_verbs_tx_desc() from using
772 * build_ahg() will modify as appropriate
773 * to use the AHG feature.
775 priv->s_hdr->tx_flags = 0;
776 priv->s_hdr->ahgcount = 0;
777 priv->s_hdr->ahgidx = 0;
778 priv->s_hdr->sde = NULL;
779 if (qp->s_mig_state == IB_MIG_MIGRATED)
780 bth0 |= IB_BTH_MIG_REQ;
786 qp->s_flags &= ~RVT_S_AHG_VALID;
787 priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
788 priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
789 priv->s_hdr->ibh.lrh[2] =
790 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
791 priv->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
792 qp->remote_ah_attr.src_path_bits);
793 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
794 bth0 |= extra_bytes << 20;
795 ohdr->bth[0] = cpu_to_be32(bth0);
796 bth1 = qp->remote_qpn;
797 if (qp->s_flags & RVT_S_ECN) {
798 qp->s_flags &= ~RVT_S_ECN;
799 /* we recently received a FECN, so return a BECN */
800 bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
802 ohdr->bth[1] = cpu_to_be32(bth1);
803 ohdr->bth[2] = cpu_to_be32(bth2);
806 /* when sending, force a reschedule every one of these periods */
807 #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
809 void _hfi1_do_send(struct work_struct *work)
811 struct iowait *wait = container_of(work, struct iowait, iowork);
812 struct rvt_qp *qp = iowait_to_qp(wait);
818 * hfi1_do_send - perform a send on a QP
819 * @work: contains a pointer to the QP
821 * Process entries in the send work queue until credit or queue is
822 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
823 * Otherwise, two threads could send packets out of order.
825 void hfi1_do_send(struct rvt_qp *qp)
827 struct hfi1_pkt_state ps;
828 struct hfi1_qp_priv *priv = qp->priv;
829 int (*make_req)(struct rvt_qp *qp);
831 unsigned long timeout;
832 unsigned long timeout_int;
835 ps.dev = to_idev(qp->ibqp.device);
836 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
837 ps.ppd = ppd_from_ibp(ps.ibp);
839 switch (qp->ibqp.qp_type) {
841 if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
847 make_req = hfi1_make_rc_req;
848 timeout_int = (qp->timeout_jiffies);
851 if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
857 make_req = hfi1_make_uc_req;
858 timeout_int = SEND_RESCHED_TIMEOUT;
861 make_req = hfi1_make_ud_req;
862 timeout_int = SEND_RESCHED_TIMEOUT;
865 spin_lock_irqsave(&qp->s_lock, flags);
867 /* Return if we are already busy processing a work request. */
868 if (!hfi1_send_ok(qp)) {
869 spin_unlock_irqrestore(&qp->s_lock, flags);
873 qp->s_flags |= RVT_S_BUSY;
875 timeout = jiffies + (timeout_int) / 8;
876 cpu = priv->s_sde ? priv->s_sde->cpu :
877 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
879 /* Check for a constructed packet to be sent. */
880 if (qp->s_hdrwords != 0) {
881 spin_unlock_irqrestore(&qp->s_lock, flags);
883 * If the packet cannot be sent now, return and
884 * the send tasklet will be woken up later.
886 if (hfi1_verbs_send(qp, &ps))
888 /* Record that s_hdr is empty. */
890 /* allow other tasks to run */
891 if (unlikely(time_after(jiffies, timeout))) {
892 if (workqueue_congested(cpu,
894 spin_lock_irqsave(&qp->s_lock, flags);
895 qp->s_flags &= ~RVT_S_BUSY;
896 hfi1_schedule_send(qp);
897 spin_unlock_irqrestore(&qp->s_lock,
900 *ps.ppd->dd->send_schedule);
904 this_cpu_inc(*ps.ppd->dd->send_schedule);
905 timeout = jiffies + (timeout_int) / 8;
907 spin_lock_irqsave(&qp->s_lock, flags);
909 } while (make_req(qp));
911 spin_unlock_irqrestore(&qp->s_lock, flags);
915 * This should be called with s_lock held.
917 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
918 enum ib_wc_status status)
923 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
928 if (++last >= qp->s_size)
931 /* See post_send() */
933 for (i = 0; i < wqe->wr.num_sge; i++) {
934 struct rvt_sge *sge = &wqe->sg_list[i];
938 if (qp->ibqp.qp_type == IB_QPT_UD ||
939 qp->ibqp.qp_type == IB_QPT_SMI ||
940 qp->ibqp.qp_type == IB_QPT_GSI)
941 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
943 /* See ch. 11.2.4.1 and 10.7.3.1 */
944 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
945 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
946 status != IB_WC_SUCCESS) {
949 memset(&wc, 0, sizeof(wc));
950 wc.wr_id = wqe->wr.wr_id;
952 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
954 if (status == IB_WC_SUCCESS)
955 wc.byte_len = wqe->length;
956 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
957 status != IB_WC_SUCCESS);
960 if (qp->s_acked == old_last)
962 if (qp->s_cur == old_last)
964 if (qp->s_tail == old_last)
966 if (qp->state == IB_QPS_SQD && last == qp->s_cur)