2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
54 #include "verbs_txreq.h"
57 /* cut down ridiculously long IB macro names */
58 #define OP(x) RC_OP(x)
60 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
65 len = delta_psn(psn, wqe->psn) * pmtu;
66 ss->sge = wqe->sg_list[0];
67 ss->sg_list = wqe->sg_list + 1;
68 ss->num_sge = wqe->wr.num_sge;
69 ss->total_len = wqe->length;
70 rvt_skip_sge(ss, len, false);
71 return wqe->length - len;
75 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
76 * @dev: the device for this QP
77 * @qp: a pointer to the QP
78 * @ohdr: a pointer to the IB header being constructed
79 * @ps: the xmit packet state
81 * Return 1 if constructed; otherwise, return 0.
82 * Note that we are in the responder's side of the QP context.
83 * Note the QP s_lock must be held.
85 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
86 struct ib_other_headers *ohdr,
87 struct hfi1_pkt_state *ps)
89 struct rvt_ack_entry *e;
96 struct hfi1_qp_priv *priv = qp->priv;
98 lockdep_assert_held(&qp->s_lock);
99 /* Don't send an ACK if we aren't supposed to. */
100 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
103 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
106 switch (qp->s_ack_state) {
107 case OP(RDMA_READ_RESPONSE_LAST):
108 case OP(RDMA_READ_RESPONSE_ONLY):
109 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
110 if (e->rdma_sge.mr) {
111 rvt_put_mr(e->rdma_sge.mr);
112 e->rdma_sge.mr = NULL;
115 case OP(ATOMIC_ACKNOWLEDGE):
117 * We can increment the tail pointer now that the last
118 * response has been sent instead of only being
121 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
122 qp->s_tail_ack_queue = 0;
125 case OP(ACKNOWLEDGE):
126 /* Check for no next entry in the queue. */
127 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
128 if (qp->s_flags & RVT_S_ACK_PENDING)
133 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
134 if (e->opcode == OP(RDMA_READ_REQUEST)) {
136 * If a RDMA read response is being resent and
137 * we haven't seen the duplicate request yet,
138 * then stop sending the remaining responses the
139 * responder has seen until the requester re-sends it.
141 len = e->rdma_sge.sge_length;
142 if (len && !e->rdma_sge.mr) {
143 qp->s_tail_ack_queue = qp->r_head_ack_queue;
146 /* Copy SGE state in case we need to resend */
147 ps->s_txreq->mr = e->rdma_sge.mr;
149 rvt_get_mr(ps->s_txreq->mr);
150 qp->s_ack_rdma_sge.sge = e->rdma_sge;
151 qp->s_ack_rdma_sge.num_sge = 1;
152 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
155 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
157 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
160 ohdr->u.aeth = rvt_compute_aeth(qp);
162 qp->s_ack_rdma_psn = e->psn;
163 bth2 = mask_psn(qp->s_ack_rdma_psn++);
165 /* COMPARE_SWAP or FETCH_ADD */
166 ps->s_txreq->ss = NULL;
168 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
169 ohdr->u.at.aeth = rvt_compute_aeth(qp);
170 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
171 hwords += sizeof(ohdr->u.at) / sizeof(u32);
172 bth2 = mask_psn(e->psn);
175 bth0 = qp->s_ack_state << 24;
178 case OP(RDMA_READ_RESPONSE_FIRST):
179 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
181 case OP(RDMA_READ_RESPONSE_MIDDLE):
182 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
183 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
185 rvt_get_mr(ps->s_txreq->mr);
186 len = qp->s_ack_rdma_sge.sge.sge_length;
189 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
191 ohdr->u.aeth = rvt_compute_aeth(qp);
193 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
194 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
197 bth0 = qp->s_ack_state << 24;
198 bth2 = mask_psn(qp->s_ack_rdma_psn++);
204 * Send a regular ACK.
205 * Set the s_ack_state so we wait until after sending
206 * the ACK before setting s_ack_state to ACKNOWLEDGE
209 qp->s_ack_state = OP(SEND_ONLY);
210 qp->s_flags &= ~RVT_S_ACK_PENDING;
211 ps->s_txreq->ss = NULL;
214 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
216 IB_AETH_CREDIT_SHIFT));
218 ohdr->u.aeth = rvt_compute_aeth(qp);
221 bth0 = OP(ACKNOWLEDGE) << 24;
222 bth2 = mask_psn(qp->s_ack_psn);
224 qp->s_rdma_ack_cnt++;
225 qp->s_hdrwords = hwords;
226 ps->s_txreq->sde = priv->s_sde;
227 ps->s_txreq->s_cur_size = len;
228 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
230 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
234 qp->s_ack_state = OP(ACKNOWLEDGE);
236 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
240 qp->s_flags &= ~(RVT_S_RESP_PENDING
247 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
248 * @qp: a pointer to the QP
250 * Assumes s_lock is held.
252 * Return 1 if constructed; otherwise, return 0.
254 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
256 struct hfi1_qp_priv *priv = qp->priv;
257 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
258 struct ib_other_headers *ohdr;
259 struct rvt_sge_state *ss;
260 struct rvt_swqe *wqe;
261 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
271 lockdep_assert_held(&qp->s_lock);
272 ps->s_txreq = get_txreq(ps->dev, qp);
273 if (IS_ERR(ps->s_txreq))
276 ohdr = &ps->s_txreq->phdr.hdr.u.oth;
277 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
278 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
280 /* Sending responses has higher priority over sending requests. */
281 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
282 make_rc_ack(dev, qp, ohdr, ps))
285 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
286 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
288 /* We are in the error state, flush the work request. */
289 smp_read_barrier_depends(); /* see post_one_send() */
290 if (qp->s_last == READ_ONCE(qp->s_head))
292 /* If DMAs are in progress, we can't flush immediately. */
293 if (iowait_sdma_pending(&priv->s_iowait)) {
294 qp->s_flags |= RVT_S_WAIT_DMA;
298 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
299 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
300 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
301 /* will get called again */
305 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
308 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
309 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
310 qp->s_flags |= RVT_S_WAIT_PSN;
313 qp->s_sending_psn = qp->s_psn;
314 qp->s_sending_hpsn = qp->s_psn - 1;
317 /* Send a request. */
318 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
319 switch (qp->s_state) {
321 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
324 * Resend an old request or start a new one.
326 * We keep track of the current SWQE so that
327 * we don't reset the "furthest progress" state
328 * if we need to back up.
331 if (qp->s_cur == qp->s_tail) {
332 /* Check if send work queue is empty. */
333 smp_read_barrier_depends(); /* see post_one_send() */
334 if (qp->s_tail == READ_ONCE(qp->s_head)) {
339 * If a fence is requested, wait for previous
340 * RDMA read and atomic operations to finish.
342 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
343 qp->s_num_rd_atomic) {
344 qp->s_flags |= RVT_S_WAIT_FENCE;
348 * Local operations are processed immediately
349 * after all prior requests have completed
351 if (wqe->wr.opcode == IB_WR_REG_MR ||
352 wqe->wr.opcode == IB_WR_LOCAL_INV) {
356 if (qp->s_last != qp->s_cur)
358 if (++qp->s_cur == qp->s_size)
360 if (++qp->s_tail == qp->s_size)
362 if (!(wqe->wr.send_flags &
363 RVT_SEND_COMPLETION_ONLY)) {
364 err = rvt_invalidate_rkey(
366 wqe->wr.ex.invalidate_rkey);
369 hfi1_send_complete(qp, wqe,
370 err ? IB_WC_LOC_PROT_ERR
373 atomic_dec(&qp->local_ops_pending);
379 qp->s_psn = wqe->psn;
382 * Note that we have to be careful not to modify the
383 * original work request since we may need to resend
388 bth2 = mask_psn(qp->s_psn);
389 switch (wqe->wr.opcode) {
391 case IB_WR_SEND_WITH_IMM:
392 case IB_WR_SEND_WITH_INV:
393 /* If no credit, return. */
394 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
395 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
396 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
400 qp->s_state = OP(SEND_FIRST);
404 if (wqe->wr.opcode == IB_WR_SEND) {
405 qp->s_state = OP(SEND_ONLY);
406 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
407 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
408 /* Immediate data comes after the BTH */
409 ohdr->u.imm_data = wqe->wr.ex.imm_data;
412 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
413 /* Invalidate rkey comes after the BTH */
414 ohdr->u.ieth = cpu_to_be32(
415 wqe->wr.ex.invalidate_rkey);
418 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
419 bth0 |= IB_BTH_SOLICITED;
420 bth2 |= IB_BTH_REQ_ACK;
421 if (++qp->s_cur == qp->s_size)
425 case IB_WR_RDMA_WRITE:
426 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
429 case IB_WR_RDMA_WRITE_WITH_IMM:
430 /* If no credit, return. */
431 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
432 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
433 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
437 wqe->rdma_wr.remote_addr,
439 ohdr->u.rc.reth.rkey =
440 cpu_to_be32(wqe->rdma_wr.rkey);
441 ohdr->u.rc.reth.length = cpu_to_be32(len);
442 hwords += sizeof(struct ib_reth) / sizeof(u32);
444 qp->s_state = OP(RDMA_WRITE_FIRST);
448 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
449 qp->s_state = OP(RDMA_WRITE_ONLY);
452 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
453 /* Immediate data comes after RETH */
454 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
456 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
457 bth0 |= IB_BTH_SOLICITED;
459 bth2 |= IB_BTH_REQ_ACK;
460 if (++qp->s_cur == qp->s_size)
464 case IB_WR_RDMA_READ:
466 * Don't allow more operations to be started
467 * than the QP limits allow.
470 if (qp->s_num_rd_atomic >=
471 qp->s_max_rd_atomic) {
472 qp->s_flags |= RVT_S_WAIT_RDMAR;
475 qp->s_num_rd_atomic++;
476 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
480 wqe->rdma_wr.remote_addr,
482 ohdr->u.rc.reth.rkey =
483 cpu_to_be32(wqe->rdma_wr.rkey);
484 ohdr->u.rc.reth.length = cpu_to_be32(len);
485 qp->s_state = OP(RDMA_READ_REQUEST);
486 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
489 bth2 |= IB_BTH_REQ_ACK;
490 if (++qp->s_cur == qp->s_size)
494 case IB_WR_ATOMIC_CMP_AND_SWP:
495 case IB_WR_ATOMIC_FETCH_AND_ADD:
497 * Don't allow more operations to be started
498 * than the QP limits allow.
501 if (qp->s_num_rd_atomic >=
502 qp->s_max_rd_atomic) {
503 qp->s_flags |= RVT_S_WAIT_RDMAR;
506 qp->s_num_rd_atomic++;
507 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
510 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
511 qp->s_state = OP(COMPARE_SWAP);
512 put_ib_ateth_swap(wqe->atomic_wr.swap,
513 &ohdr->u.atomic_eth);
514 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
515 &ohdr->u.atomic_eth);
517 qp->s_state = OP(FETCH_ADD);
518 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
519 &ohdr->u.atomic_eth);
520 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
522 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
523 &ohdr->u.atomic_eth);
524 ohdr->u.atomic_eth.rkey = cpu_to_be32(
525 wqe->atomic_wr.rkey);
526 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
529 bth2 |= IB_BTH_REQ_ACK;
530 if (++qp->s_cur == qp->s_size)
537 qp->s_sge.sge = wqe->sg_list[0];
538 qp->s_sge.sg_list = wqe->sg_list + 1;
539 qp->s_sge.num_sge = wqe->wr.num_sge;
540 qp->s_sge.total_len = wqe->length;
541 qp->s_len = wqe->length;
544 if (qp->s_tail >= qp->s_size)
547 if (wqe->wr.opcode == IB_WR_RDMA_READ)
548 qp->s_psn = wqe->lpsn + 1;
553 case OP(RDMA_READ_RESPONSE_FIRST):
555 * qp->s_state is normally set to the opcode of the
556 * last packet constructed for new requests and therefore
557 * is never set to RDMA read response.
558 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
559 * thread to indicate a SEND needs to be restarted from an
560 * earlier PSN without interfering with the sending thread.
563 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
566 qp->s_state = OP(SEND_MIDDLE);
568 case OP(SEND_MIDDLE):
569 bth2 = mask_psn(qp->s_psn++);
574 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
577 if (wqe->wr.opcode == IB_WR_SEND) {
578 qp->s_state = OP(SEND_LAST);
579 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
580 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
581 /* Immediate data comes after the BTH */
582 ohdr->u.imm_data = wqe->wr.ex.imm_data;
585 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
586 /* invalidate data comes after the BTH */
587 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
590 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
591 bth0 |= IB_BTH_SOLICITED;
592 bth2 |= IB_BTH_REQ_ACK;
594 if (qp->s_cur >= qp->s_size)
598 case OP(RDMA_READ_RESPONSE_LAST):
600 * qp->s_state is normally set to the opcode of the
601 * last packet constructed for new requests and therefore
602 * is never set to RDMA read response.
603 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
604 * thread to indicate a RDMA write needs to be restarted from
605 * an earlier PSN without interfering with the sending thread.
608 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
610 case OP(RDMA_WRITE_FIRST):
611 qp->s_state = OP(RDMA_WRITE_MIDDLE);
613 case OP(RDMA_WRITE_MIDDLE):
614 bth2 = mask_psn(qp->s_psn++);
619 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
622 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
623 qp->s_state = OP(RDMA_WRITE_LAST);
625 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
626 /* Immediate data comes after the BTH */
627 ohdr->u.imm_data = wqe->wr.ex.imm_data;
629 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
630 bth0 |= IB_BTH_SOLICITED;
632 bth2 |= IB_BTH_REQ_ACK;
634 if (qp->s_cur >= qp->s_size)
638 case OP(RDMA_READ_RESPONSE_MIDDLE):
640 * qp->s_state is normally set to the opcode of the
641 * last packet constructed for new requests and therefore
642 * is never set to RDMA read response.
643 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
644 * thread to indicate a RDMA read needs to be restarted from
645 * an earlier PSN without interfering with the sending thread.
648 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
650 wqe->rdma_wr.remote_addr + len,
652 ohdr->u.rc.reth.rkey =
653 cpu_to_be32(wqe->rdma_wr.rkey);
654 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
655 qp->s_state = OP(RDMA_READ_REQUEST);
656 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
657 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
658 qp->s_psn = wqe->lpsn + 1;
662 if (qp->s_cur == qp->s_size)
666 qp->s_sending_hpsn = bth2;
667 delta = delta_psn(bth2, wqe->psn);
668 if (delta && delta % HFI1_PSN_CREDIT == 0)
669 bth2 |= IB_BTH_REQ_ACK;
670 if (qp->s_flags & RVT_S_SEND_ONE) {
671 qp->s_flags &= ~RVT_S_SEND_ONE;
672 qp->s_flags |= RVT_S_WAIT_ACK;
673 bth2 |= IB_BTH_REQ_ACK;
676 qp->s_hdrwords = hwords;
677 ps->s_txreq->sde = priv->s_sde;
678 ps->s_txreq->ss = ss;
679 ps->s_txreq->s_cur_size = len;
680 hfi1_make_ruc_header(
683 bth0 | (qp->s_state << 24),
688 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
692 hfi1_put_txreq(ps->s_txreq);
697 hfi1_put_txreq(ps->s_txreq);
701 qp->s_flags &= ~RVT_S_BUSY;
707 * hfi1_send_rc_ack - Construct an ACK packet and send it
708 * @qp: a pointer to the QP
710 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
711 * Note that RDMA reads and atomics are handled in the
712 * send side QP state and send engine.
714 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
717 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
718 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
719 u64 pbc, pbc_flags = 0;
725 struct send_context *sc;
726 struct pio_buf *pbuf;
727 struct ib_header hdr;
728 struct ib_other_headers *ohdr;
730 struct hfi1_qp_priv *priv = qp->priv;
732 /* clear the defer count */
733 priv->r_adefered = 0;
735 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
736 if (qp->s_flags & RVT_S_RESP_PENDING)
739 /* Ensure s_rdma_ack_cnt changes are committed */
740 smp_read_barrier_depends();
741 if (qp->s_rdma_ack_cnt)
744 /* Construct the header */
745 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
747 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
748 hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
749 &qp->remote_ah_attr.grh, hwords, 0);
756 /* read pkey_index w/o lock (its atomic) */
757 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
758 if (qp->s_mig_state == IB_MIG_MIGRATED)
759 bth0 |= IB_BTH_MIG_REQ;
761 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
763 IB_AETH_CREDIT_SHIFT));
765 ohdr->u.aeth = rvt_compute_aeth(qp);
766 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
767 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
768 pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
769 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
770 hdr.lrh[0] = cpu_to_be16(lrh0);
771 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
772 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
773 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
774 ohdr->bth[0] = cpu_to_be32(bth0);
775 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
776 ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
777 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
779 /* Don't try to send ACKs if the link isn't ACTIVE */
780 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
784 plen = 2 /* PBC */ + hwords;
785 vl = sc_to_vlt(ppd->dd, sc5);
786 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
788 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
791 * We have no room to send at the moment. Pass
792 * responsibility for sending the ACK to the send engine
793 * so that when enough buffer space becomes available,
794 * the ACK is sent ahead of other outgoing packets.
799 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
801 /* write the pbc and data */
802 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
807 spin_lock_irqsave(&qp->s_lock, flags);
808 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
810 this_cpu_inc(*ibp->rvp.rc_qacks);
811 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
812 qp->s_nak_state = qp->r_nak_state;
813 qp->s_ack_psn = qp->r_ack_psn;
815 qp->s_flags |= RVT_S_ECN;
817 /* Schedule the send engine. */
818 hfi1_schedule_send(qp);
820 spin_unlock_irqrestore(&qp->s_lock, flags);
824 * reset_psn - reset the QP state to send starting from PSN
826 * @psn: the packet sequence number to restart at
828 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
830 * Called at interrupt level with the QP s_lock held.
832 static void reset_psn(struct rvt_qp *qp, u32 psn)
835 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
838 lockdep_assert_held(&qp->s_lock);
842 * If we are starting the request from the beginning,
843 * let the normal send code handle initialization.
845 if (cmp_psn(psn, wqe->psn) <= 0) {
846 qp->s_state = OP(SEND_LAST);
850 /* Find the work request opcode corresponding to the given PSN. */
851 opcode = wqe->wr.opcode;
855 if (++n == qp->s_size)
859 wqe = rvt_get_swqe_ptr(qp, n);
860 diff = cmp_psn(psn, wqe->psn);
865 * If we are starting the request from the beginning,
866 * let the normal send code handle initialization.
869 qp->s_state = OP(SEND_LAST);
872 opcode = wqe->wr.opcode;
876 * Set the state to restart in the middle of a request.
877 * Don't change the s_sge, s_cur_sge, or s_cur_size.
878 * See hfi1_make_rc_req().
882 case IB_WR_SEND_WITH_IMM:
883 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
886 case IB_WR_RDMA_WRITE:
887 case IB_WR_RDMA_WRITE_WITH_IMM:
888 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
891 case IB_WR_RDMA_READ:
892 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
897 * This case shouldn't happen since its only
900 qp->s_state = OP(SEND_LAST);
905 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
906 * asynchronously before the send engine can get scheduled.
907 * Doing it in hfi1_make_rc_req() is too late.
909 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
910 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
911 qp->s_flags |= RVT_S_WAIT_PSN;
912 qp->s_flags &= ~RVT_S_AHG_VALID;
916 * Back up requester to resend the last un-ACKed request.
917 * The QP r_lock and s_lock should be held and interrupts disabled.
919 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
921 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
922 struct hfi1_ibport *ibp;
924 lockdep_assert_held(&qp->r_lock);
925 lockdep_assert_held(&qp->s_lock);
926 if (qp->s_retry == 0) {
927 if (qp->s_mig_state == IB_MIG_ARMED) {
929 qp->s_retry = qp->s_retry_cnt;
930 } else if (qp->s_last == qp->s_acked) {
931 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
932 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
934 } else { /* need to handle delayed completion */
941 ibp = to_iport(qp->ibqp.device, qp->port_num);
942 if (wqe->wr.opcode == IB_WR_RDMA_READ)
943 ibp->rvp.n_rc_resends++;
945 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
947 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
948 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
951 qp->s_flags |= RVT_S_SEND_ONE;
956 * Set qp->s_sending_psn to the next PSN after the given one.
957 * This would be psn+1 except when RDMA reads are present.
959 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
961 struct rvt_swqe *wqe;
964 lockdep_assert_held(&qp->s_lock);
965 /* Find the work request corresponding to the given PSN. */
967 wqe = rvt_get_swqe_ptr(qp, n);
968 if (cmp_psn(psn, wqe->lpsn) <= 0) {
969 if (wqe->wr.opcode == IB_WR_RDMA_READ)
970 qp->s_sending_psn = wqe->lpsn + 1;
972 qp->s_sending_psn = psn + 1;
975 if (++n == qp->s_size)
983 * This should be called with the QP s_lock held and interrupts disabled.
985 void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
987 struct ib_other_headers *ohdr;
988 struct rvt_swqe *wqe;
992 lockdep_assert_held(&qp->s_lock);
993 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
996 /* Find out where the BTH is */
997 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1000 ohdr = &hdr->u.l.oth;
1002 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1003 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1004 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1005 WARN_ON(!qp->s_rdma_ack_cnt);
1006 qp->s_rdma_ack_cnt--;
1010 psn = be32_to_cpu(ohdr->bth[2]);
1011 reset_sending_psn(qp, psn);
1014 * Start timer after a packet requesting an ACK has been sent and
1015 * there are still requests that haven't been acked.
1017 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1019 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1020 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1021 rvt_add_retry_timer(qp);
1023 while (qp->s_last != qp->s_acked) {
1026 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1027 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1028 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1030 s_last = qp->s_last;
1031 trace_hfi1_qp_send_completion(qp, wqe, s_last);
1032 if (++s_last >= qp->s_size)
1034 qp->s_last = s_last;
1035 /* see post_send() */
1038 rvt_qp_swqe_complete(qp,
1040 ib_hfi1_wc_opcode[wqe->wr.opcode],
1044 * If we were waiting for sends to complete before re-sending,
1045 * and they are now complete, restart sending.
1047 trace_hfi1_sendcomplete(qp, psn);
1048 if (qp->s_flags & RVT_S_WAIT_PSN &&
1049 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1050 qp->s_flags &= ~RVT_S_WAIT_PSN;
1051 qp->s_sending_psn = qp->s_psn;
1052 qp->s_sending_hpsn = qp->s_psn - 1;
1053 hfi1_schedule_send(qp);
1057 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1059 qp->s_last_psn = psn;
1063 * Generate a SWQE completion.
1064 * This is similar to hfi1_send_complete but has to check to be sure
1065 * that the SGEs are not being referenced if the SWQE is being resent.
1067 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1068 struct rvt_swqe *wqe,
1069 struct hfi1_ibport *ibp)
1071 lockdep_assert_held(&qp->s_lock);
1073 * Don't decrement refcount and don't generate a
1074 * completion if the SWQE is being resent until the send
1077 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1078 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1082 s_last = qp->s_last;
1083 trace_hfi1_qp_send_completion(qp, wqe, s_last);
1084 if (++s_last >= qp->s_size)
1086 qp->s_last = s_last;
1087 /* see post_send() */
1089 rvt_qp_swqe_complete(qp,
1091 ib_hfi1_wc_opcode[wqe->wr.opcode],
1094 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1096 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1098 * If send progress not running attempt to progress
1101 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1102 struct sdma_engine *engine;
1105 /* For now use sc to find engine */
1106 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
1107 engine = qp_to_sdma_engine(qp, sc5);
1108 sdma_engine_progress_schedule(engine);
1112 qp->s_retry = qp->s_retry_cnt;
1113 update_last_psn(qp, wqe->lpsn);
1116 * If we are completing a request which is in the process of
1117 * being resent, we can stop re-sending it since we know the
1118 * responder has already seen it.
1120 if (qp->s_acked == qp->s_cur) {
1121 if (++qp->s_cur >= qp->s_size)
1123 qp->s_acked = qp->s_cur;
1124 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1125 if (qp->s_acked != qp->s_tail) {
1126 qp->s_state = OP(SEND_LAST);
1127 qp->s_psn = wqe->psn;
1130 if (++qp->s_acked >= qp->s_size)
1132 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1134 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1140 * do_rc_ack - process an incoming RC ACK
1141 * @qp: the QP the ACK came in on
1142 * @psn: the packet sequence number of the ACK
1143 * @opcode: the opcode of the request that resulted in the ACK
1145 * This is called from rc_rcv_resp() to process an incoming RC ACK
1147 * May be called at interrupt level, with the QP s_lock held.
1148 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1150 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1151 u64 val, struct hfi1_ctxtdata *rcd)
1153 struct hfi1_ibport *ibp;
1154 enum ib_wc_status status;
1155 struct rvt_swqe *wqe;
1160 lockdep_assert_held(&qp->s_lock);
1162 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1163 * requests and implicitly NAK RDMA read and atomic requests issued
1164 * before the NAK'ed request. The MSN won't include the NAK'ed
1165 * request but will include an ACK'ed request(s).
1168 if (aeth >> IB_AETH_NAK_SHIFT)
1170 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1171 ibp = rcd_to_iport(rcd);
1174 * The MSN might be for a later WQE than the PSN indicates so
1175 * only complete WQEs that the PSN finishes.
1177 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
1179 * RDMA_READ_RESPONSE_ONLY is a special case since
1180 * we want to generate completion events for everything
1181 * before the RDMA read, copy the data, then generate
1182 * the completion for the read.
1184 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1185 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1191 * If this request is a RDMA read or atomic, and the ACK is
1192 * for a later operation, this ACK NAKs the RDMA read or
1193 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1194 * can ACK a RDMA read and likewise for atomic ops. Note
1195 * that the NAK case can only happen if relaxed ordering is
1196 * used and requests are sent after an RDMA read or atomic
1197 * is sent but before the response is received.
1199 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1200 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1201 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1202 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1203 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1204 /* Retry this request. */
1205 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1206 qp->r_flags |= RVT_R_RDMAR_SEQ;
1207 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1208 if (list_empty(&qp->rspwait)) {
1209 qp->r_flags |= RVT_R_RSP_SEND;
1211 list_add_tail(&qp->rspwait,
1212 &rcd->qp_wait_list);
1216 * No need to process the ACK/NAK since we are
1217 * restarting an earlier request.
1221 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1222 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1223 u64 *vaddr = wqe->sg_list[0].vaddr;
1226 if (qp->s_num_rd_atomic &&
1227 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1228 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1229 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1230 qp->s_num_rd_atomic--;
1231 /* Restart sending task if fence is complete */
1232 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1233 !qp->s_num_rd_atomic) {
1234 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1236 hfi1_schedule_send(qp);
1237 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1238 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1240 hfi1_schedule_send(qp);
1243 wqe = do_rc_completion(qp, wqe, ibp);
1244 if (qp->s_acked == qp->s_tail)
1248 switch (aeth >> IB_AETH_NAK_SHIFT) {
1250 this_cpu_inc(*ibp->rvp.rc_acks);
1251 if (qp->s_acked != qp->s_tail) {
1253 * We are expecting more ACKs so
1254 * mod the retry timer.
1256 rvt_mod_retry_timer(qp);
1258 * We can stop re-sending the earlier packets and
1259 * continue with the next packet the receiver wants.
1261 if (cmp_psn(qp->s_psn, psn) <= 0)
1262 reset_psn(qp, psn + 1);
1264 /* No more acks - kill all timers */
1265 rvt_stop_rc_timers(qp);
1266 if (cmp_psn(qp->s_psn, psn) <= 0) {
1267 qp->s_state = OP(SEND_LAST);
1268 qp->s_psn = psn + 1;
1271 if (qp->s_flags & RVT_S_WAIT_ACK) {
1272 qp->s_flags &= ~RVT_S_WAIT_ACK;
1273 hfi1_schedule_send(qp);
1275 rvt_get_credit(qp, aeth);
1276 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1277 qp->s_retry = qp->s_retry_cnt;
1278 update_last_psn(qp, psn);
1281 case 1: /* RNR NAK */
1282 ibp->rvp.n_rnr_naks++;
1283 if (qp->s_acked == qp->s_tail)
1285 if (qp->s_flags & RVT_S_WAIT_RNR)
1287 if (qp->s_rnr_retry == 0) {
1288 status = IB_WC_RNR_RETRY_EXC_ERR;
1291 if (qp->s_rnr_retry_cnt < 7)
1294 /* The last valid PSN is the previous PSN. */
1295 update_last_psn(qp, psn - 1);
1297 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1301 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1302 rvt_stop_rc_timers(qp);
1303 rvt_add_rnr_timer(qp, aeth);
1307 if (qp->s_acked == qp->s_tail)
1309 /* The last valid PSN is the previous PSN. */
1310 update_last_psn(qp, psn - 1);
1311 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1312 IB_AETH_CREDIT_MASK) {
1313 case 0: /* PSN sequence error */
1314 ibp->rvp.n_seq_naks++;
1316 * Back up to the responder's expected PSN.
1317 * Note that we might get a NAK in the middle of an
1318 * RDMA READ response which terminates the RDMA
1321 hfi1_restart_rc(qp, psn, 0);
1322 hfi1_schedule_send(qp);
1325 case 1: /* Invalid Request */
1326 status = IB_WC_REM_INV_REQ_ERR;
1327 ibp->rvp.n_other_naks++;
1330 case 2: /* Remote Access Error */
1331 status = IB_WC_REM_ACCESS_ERR;
1332 ibp->rvp.n_other_naks++;
1335 case 3: /* Remote Operation Error */
1336 status = IB_WC_REM_OP_ERR;
1337 ibp->rvp.n_other_naks++;
1339 if (qp->s_last == qp->s_acked) {
1340 hfi1_send_complete(qp, wqe, status);
1341 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1346 /* Ignore other reserved NAK error codes */
1349 qp->s_retry = qp->s_retry_cnt;
1350 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1353 default: /* 2: reserved */
1355 /* Ignore reserved NAK codes. */
1358 /* cannot be reached */
1360 rvt_stop_rc_timers(qp);
1365 * We have seen an out of sequence RDMA read middle or last packet.
1366 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1368 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
1369 struct hfi1_ctxtdata *rcd)
1371 struct rvt_swqe *wqe;
1373 lockdep_assert_held(&qp->s_lock);
1374 /* Remove QP from retry timer */
1375 rvt_stop_rc_timers(qp);
1377 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1379 while (cmp_psn(psn, wqe->lpsn) > 0) {
1380 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1381 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1382 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1384 wqe = do_rc_completion(qp, wqe, ibp);
1387 ibp->rvp.n_rdma_seq++;
1388 qp->r_flags |= RVT_R_RDMAR_SEQ;
1389 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1390 if (list_empty(&qp->rspwait)) {
1391 qp->r_flags |= RVT_R_RSP_SEND;
1393 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1398 * rc_rcv_resp - process an incoming RC response packet
1399 * @ibp: the port this packet came in on
1400 * @ohdr: the other headers for this packet
1401 * @data: the packet data
1402 * @tlen: the packet length
1403 * @qp: the QP for this packet
1404 * @opcode: the opcode for this packet
1405 * @psn: the packet sequence number for this packet
1406 * @hdrsize: the header length
1407 * @pmtu: the path MTU
1409 * This is called from hfi1_rc_rcv() to process an incoming RC response
1410 * packet for the given QP.
1411 * Called at interrupt level.
1413 static void rc_rcv_resp(struct hfi1_ibport *ibp,
1414 struct ib_other_headers *ohdr,
1415 void *data, u32 tlen, struct rvt_qp *qp,
1416 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
1417 struct hfi1_ctxtdata *rcd)
1419 struct rvt_swqe *wqe;
1420 enum ib_wc_status status;
1421 unsigned long flags;
1427 spin_lock_irqsave(&qp->s_lock, flags);
1429 trace_hfi1_ack(qp, psn);
1431 /* Ignore invalid responses. */
1432 smp_read_barrier_depends(); /* see post_one_send */
1433 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1436 /* Ignore duplicate responses. */
1437 diff = cmp_psn(psn, qp->s_last_psn);
1438 if (unlikely(diff <= 0)) {
1439 /* Update credits for "ghost" ACKs */
1440 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1441 aeth = be32_to_cpu(ohdr->u.aeth);
1442 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1443 rvt_get_credit(qp, aeth);
1449 * Skip everything other than the PSN we expect, if we are waiting
1450 * for a reply to a restarted RDMA read or atomic op.
1452 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1453 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
1455 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1458 if (unlikely(qp->s_acked == qp->s_tail))
1460 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1461 status = IB_WC_SUCCESS;
1464 case OP(ACKNOWLEDGE):
1465 case OP(ATOMIC_ACKNOWLEDGE):
1466 case OP(RDMA_READ_RESPONSE_FIRST):
1467 aeth = be32_to_cpu(ohdr->u.aeth);
1468 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1469 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1472 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1473 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1475 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1476 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1479 * If this is a response to a resent RDMA read, we
1480 * have to be careful to copy the data to the right
1483 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1487 case OP(RDMA_READ_RESPONSE_MIDDLE):
1488 /* no AETH, no ACK */
1489 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1491 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1494 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1496 if (unlikely(pmtu >= qp->s_rdma_read_len))
1500 * We got a response so update the timeout.
1501 * 4.096 usec. * (1 << qp->timeout)
1503 rvt_mod_retry_timer(qp);
1504 if (qp->s_flags & RVT_S_WAIT_ACK) {
1505 qp->s_flags &= ~RVT_S_WAIT_ACK;
1506 hfi1_schedule_send(qp);
1509 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1510 qp->s_retry = qp->s_retry_cnt;
1513 * Update the RDMA receive state but do the copy w/o
1514 * holding the locks and blocking interrupts.
1516 qp->s_rdma_read_len -= pmtu;
1517 update_last_psn(qp, psn);
1518 spin_unlock_irqrestore(&qp->s_lock, flags);
1519 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
1522 case OP(RDMA_READ_RESPONSE_ONLY):
1523 aeth = be32_to_cpu(ohdr->u.aeth);
1524 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1526 /* Get the number of bytes the message was padded by. */
1527 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1529 * Check that the data size is >= 0 && <= pmtu.
1530 * Remember to account for ICRC (4).
1532 if (unlikely(tlen < (hdrsize + pad + 4)))
1535 * If this is a response to a resent RDMA read, we
1536 * have to be careful to copy the data to the right
1539 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1540 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1544 case OP(RDMA_READ_RESPONSE_LAST):
1545 /* ACKs READ req. */
1546 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1548 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1550 /* Get the number of bytes the message was padded by. */
1551 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1553 * Check that the data size is >= 1 && <= pmtu.
1554 * Remember to account for ICRC (4).
1556 if (unlikely(tlen <= (hdrsize + pad + 4)))
1559 tlen -= hdrsize + pad + 4;
1560 if (unlikely(tlen != qp->s_rdma_read_len))
1562 aeth = be32_to_cpu(ohdr->u.aeth);
1563 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
1564 WARN_ON(qp->s_rdma_read_sge.num_sge);
1565 (void)do_rc_ack(qp, aeth, psn,
1566 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1571 status = IB_WC_LOC_QP_OP_ERR;
1575 rdma_seq_err(qp, ibp, psn, rcd);
1579 status = IB_WC_LOC_LEN_ERR;
1581 if (qp->s_last == qp->s_acked) {
1582 hfi1_send_complete(qp, wqe, status);
1583 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1586 spin_unlock_irqrestore(&qp->s_lock, flags);
1591 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
1594 if (list_empty(&qp->rspwait)) {
1595 qp->r_flags |= RVT_R_RSP_NAK;
1597 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1601 static inline void rc_cancel_ack(struct rvt_qp *qp)
1603 struct hfi1_qp_priv *priv = qp->priv;
1605 priv->r_adefered = 0;
1606 if (list_empty(&qp->rspwait))
1608 list_del_init(&qp->rspwait);
1609 qp->r_flags &= ~RVT_R_RSP_NAK;
1614 * rc_rcv_error - process an incoming duplicate or error RC packet
1615 * @ohdr: the other headers for this packet
1616 * @data: the packet data
1617 * @qp: the QP for this packet
1618 * @opcode: the opcode for this packet
1619 * @psn: the packet sequence number for this packet
1620 * @diff: the difference between the PSN and the expected PSN
1622 * This is called from hfi1_rc_rcv() to process an unexpected
1623 * incoming RC packet for the given QP.
1624 * Called at interrupt level.
1625 * Return 1 if no more processing is needed; otherwise return 0 to
1626 * schedule a response to be sent.
1628 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
1629 struct rvt_qp *qp, u32 opcode, u32 psn,
1630 int diff, struct hfi1_ctxtdata *rcd)
1632 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1633 struct rvt_ack_entry *e;
1634 unsigned long flags;
1638 trace_hfi1_rcv_error(qp, psn);
1641 * Packet sequence error.
1642 * A NAK will ACK earlier sends and RDMA writes.
1643 * Don't queue the NAK if we already sent one.
1645 if (!qp->r_nak_state) {
1646 ibp->rvp.n_rc_seqnak++;
1647 qp->r_nak_state = IB_NAK_PSN_ERROR;
1648 /* Use the expected PSN. */
1649 qp->r_ack_psn = qp->r_psn;
1651 * Wait to send the sequence NAK until all packets
1652 * in the receive queue have been processed.
1653 * Otherwise, we end up propagating congestion.
1655 rc_defered_ack(rcd, qp);
1661 * Handle a duplicate request. Don't re-execute SEND, RDMA
1662 * write or atomic op. Don't NAK errors, just silently drop
1663 * the duplicate request. Note that r_sge, r_len, and
1664 * r_rcv_len may be in use so don't modify them.
1666 * We are supposed to ACK the earliest duplicate PSN but we
1667 * can coalesce an outstanding duplicate ACK. We have to
1668 * send the earliest so that RDMA reads can be restarted at
1669 * the requester's expected PSN.
1671 * First, find where this duplicate PSN falls within the
1672 * ACKs previously sent.
1673 * old_req is true if there is an older response that is scheduled
1674 * to be sent before sending this one.
1678 ibp->rvp.n_rc_dupreq++;
1680 spin_lock_irqsave(&qp->s_lock, flags);
1682 for (i = qp->r_head_ack_queue; ; i = prev) {
1683 if (i == qp->s_tail_ack_queue)
1688 prev = HFI1_MAX_RDMA_ATOMIC;
1689 if (prev == qp->r_head_ack_queue) {
1693 e = &qp->s_ack_queue[prev];
1698 if (cmp_psn(psn, e->psn) >= 0) {
1699 if (prev == qp->s_tail_ack_queue &&
1700 cmp_psn(psn, e->lpsn) <= 0)
1706 case OP(RDMA_READ_REQUEST): {
1707 struct ib_reth *reth;
1712 * If we didn't find the RDMA read request in the ack queue,
1713 * we can ignore this request.
1715 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1717 /* RETH comes after BTH */
1718 reth = &ohdr->u.rc.reth;
1720 * Address range must be a subset of the original
1721 * request and start on pmtu boundaries.
1722 * We reuse the old ack_queue slot since the requester
1723 * should not back up and request an earlier PSN for the
1726 offset = delta_psn(psn, e->psn) * qp->pmtu;
1727 len = be32_to_cpu(reth->length);
1728 if (unlikely(offset + len != e->rdma_sge.sge_length))
1730 if (e->rdma_sge.mr) {
1731 rvt_put_mr(e->rdma_sge.mr);
1732 e->rdma_sge.mr = NULL;
1735 u32 rkey = be32_to_cpu(reth->rkey);
1736 u64 vaddr = get_ib_reth_vaddr(reth);
1739 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1740 IB_ACCESS_REMOTE_READ);
1744 e->rdma_sge.vaddr = NULL;
1745 e->rdma_sge.length = 0;
1746 e->rdma_sge.sge_length = 0;
1751 qp->s_tail_ack_queue = prev;
1755 case OP(COMPARE_SWAP):
1756 case OP(FETCH_ADD): {
1758 * If we didn't find the atomic request in the ack queue
1759 * or the send engine is already backed up to send an
1760 * earlier entry, we can ignore this request.
1762 if (!e || e->opcode != (u8)opcode || old_req)
1764 qp->s_tail_ack_queue = prev;
1770 * Ignore this operation if it doesn't request an ACK
1771 * or an earlier RDMA read or atomic is going to be resent.
1773 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1776 * Resend the most recent ACK if this request is
1777 * after all the previous RDMA reads and atomics.
1779 if (i == qp->r_head_ack_queue) {
1780 spin_unlock_irqrestore(&qp->s_lock, flags);
1781 qp->r_nak_state = 0;
1782 qp->r_ack_psn = qp->r_psn - 1;
1787 * Resend the RDMA read or atomic op which
1788 * ACKs this duplicate request.
1790 qp->s_tail_ack_queue = i;
1793 qp->s_ack_state = OP(ACKNOWLEDGE);
1794 qp->s_flags |= RVT_S_RESP_PENDING;
1795 qp->r_nak_state = 0;
1796 hfi1_schedule_send(qp);
1799 spin_unlock_irqrestore(&qp->s_lock, flags);
1807 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
1812 if (next > HFI1_MAX_RDMA_ATOMIC)
1814 qp->s_tail_ack_queue = next;
1815 qp->s_ack_state = OP(ACKNOWLEDGE);
1818 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
1819 u32 lqpn, u32 rqpn, u8 svc_type)
1821 struct opa_hfi1_cong_log_event_internal *cc_event;
1822 unsigned long flags;
1824 if (sl >= OPA_MAX_SLS)
1827 spin_lock_irqsave(&ppd->cc_log_lock, flags);
1829 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
1830 ppd->threshold_event_counter++;
1832 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
1833 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
1834 ppd->cc_log_idx = 0;
1835 cc_event->lqpn = lqpn & RVT_QPN_MASK;
1836 cc_event->rqpn = rqpn & RVT_QPN_MASK;
1838 cc_event->svc_type = svc_type;
1839 cc_event->rlid = rlid;
1840 /* keep timestamp in units of 1.024 usec */
1841 cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
1843 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
1846 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
1847 u32 rqpn, u8 svc_type)
1849 struct cca_timer *cca_timer;
1850 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
1851 u8 trigger_threshold;
1852 struct cc_state *cc_state;
1853 unsigned long flags;
1855 if (sl >= OPA_MAX_SLS)
1858 cc_state = get_cc_state(ppd);
1864 * 1) increase CCTI (for this SL)
1865 * 2) select IPG (i.e., call set_link_ipg())
1868 ccti_limit = cc_state->cct.ccti_limit;
1869 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
1870 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
1872 cc_state->cong_setting.entries[sl].trigger_threshold;
1874 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
1876 cca_timer = &ppd->cca_timer[sl];
1877 if (cca_timer->ccti < ccti_limit) {
1878 if (cca_timer->ccti + ccti_incr <= ccti_limit)
1879 cca_timer->ccti += ccti_incr;
1881 cca_timer->ccti = ccti_limit;
1885 ccti = cca_timer->ccti;
1887 if (!hrtimer_active(&cca_timer->hrtimer)) {
1888 /* ccti_timer is in units of 1.024 usec */
1889 unsigned long nsec = 1024 * ccti_timer;
1891 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
1895 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
1897 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
1898 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
1902 * hfi1_rc_rcv - process an incoming RC packet
1903 * @rcd: the context pointer
1904 * @hdr: the header of this packet
1905 * @rcv_flags: flags relevant to rcv processing
1906 * @data: the packet data
1907 * @tlen: the packet length
1908 * @qp: the QP for this packet
1910 * This is called from qp_rcv() to process an incoming RC packet
1912 * May be called at interrupt level.
1914 void hfi1_rc_rcv(struct hfi1_packet *packet)
1916 struct hfi1_ctxtdata *rcd = packet->rcd;
1917 struct ib_header *hdr = packet->hdr;
1918 u32 rcv_flags = packet->rcv_flags;
1919 void *data = packet->ebuf;
1920 u32 tlen = packet->tlen;
1921 struct rvt_qp *qp = packet->qp;
1922 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1923 struct ib_other_headers *ohdr = packet->ohdr;
1925 u32 hdrsize = packet->hlen;
1929 u32 pmtu = qp->pmtu;
1931 struct ib_reth *reth;
1932 unsigned long flags;
1933 int ret, is_fecn = 0;
1934 bool copy_last = false;
1937 lockdep_assert_held(&qp->r_lock);
1938 bth0 = be32_to_cpu(ohdr->bth[0]);
1939 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
1942 is_fecn = process_ecn(qp, packet, false);
1944 psn = be32_to_cpu(ohdr->bth[2]);
1945 opcode = (bth0 >> 24) & 0xff;
1948 * Process responses (ACKs) before anything else. Note that the
1949 * packet sequence number will be for something in the send work
1950 * queue rather than the expected receive packet sequence number.
1951 * In other words, this QP is the requester.
1953 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1954 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1955 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1956 hdrsize, pmtu, rcd);
1962 /* Compute 24 bits worth of difference. */
1963 diff = delta_psn(psn, qp->r_psn);
1964 if (unlikely(diff)) {
1965 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1970 /* Check for opcode sequence errors. */
1971 switch (qp->r_state) {
1972 case OP(SEND_FIRST):
1973 case OP(SEND_MIDDLE):
1974 if (opcode == OP(SEND_MIDDLE) ||
1975 opcode == OP(SEND_LAST) ||
1976 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1977 opcode == OP(SEND_LAST_WITH_INVALIDATE))
1981 case OP(RDMA_WRITE_FIRST):
1982 case OP(RDMA_WRITE_MIDDLE):
1983 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1984 opcode == OP(RDMA_WRITE_LAST) ||
1985 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1990 if (opcode == OP(SEND_MIDDLE) ||
1991 opcode == OP(SEND_LAST) ||
1992 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1993 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
1994 opcode == OP(RDMA_WRITE_MIDDLE) ||
1995 opcode == OP(RDMA_WRITE_LAST) ||
1996 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1999 * Note that it is up to the requester to not send a new
2000 * RDMA read or atomic operation before receiving an ACK
2001 * for the previous operation.
2006 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2009 /* OK, process the packet. */
2011 case OP(SEND_FIRST):
2012 ret = hfi1_rvt_get_rwqe(qp, 0);
2019 case OP(SEND_MIDDLE):
2020 case OP(RDMA_WRITE_MIDDLE):
2022 /* Check for invalid length PMTU or posted rwqe len. */
2023 if (unlikely(tlen != (hdrsize + pmtu + 4)))
2025 qp->r_rcv_len += pmtu;
2026 if (unlikely(qp->r_rcv_len > qp->r_len))
2028 hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
2031 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2033 ret = hfi1_rvt_get_rwqe(qp, 1);
2041 case OP(SEND_ONLY_WITH_IMMEDIATE):
2042 case OP(SEND_ONLY_WITH_INVALIDATE):
2043 ret = hfi1_rvt_get_rwqe(qp, 0);
2049 if (opcode == OP(SEND_ONLY))
2050 goto no_immediate_data;
2051 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2053 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2054 case OP(SEND_LAST_WITH_IMMEDIATE):
2056 wc.ex.imm_data = ohdr->u.imm_data;
2057 wc.wc_flags = IB_WC_WITH_IMM;
2059 case OP(SEND_LAST_WITH_INVALIDATE):
2061 rkey = be32_to_cpu(ohdr->u.ieth);
2062 if (rvt_invalidate_rkey(qp, rkey))
2063 goto no_immediate_data;
2064 wc.ex.invalidate_rkey = rkey;
2065 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2067 case OP(RDMA_WRITE_LAST):
2068 copy_last = rvt_is_user_qp(qp);
2075 /* Get the number of bytes the message was padded by. */
2076 pad = (bth0 >> 20) & 3;
2077 /* Check for invalid length. */
2078 /* LAST len should be >= 1 */
2079 if (unlikely(tlen < (hdrsize + pad + 4)))
2081 /* Don't count the CRC. */
2082 tlen -= (hdrsize + pad + 4);
2083 wc.byte_len = tlen + qp->r_rcv_len;
2084 if (unlikely(wc.byte_len > qp->r_len))
2086 hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
2087 rvt_put_ss(&qp->r_sge);
2089 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2091 wc.wr_id = qp->r_wr_id;
2092 wc.status = IB_WC_SUCCESS;
2093 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2094 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2095 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2097 wc.opcode = IB_WC_RECV;
2099 wc.src_qp = qp->remote_qpn;
2100 wc.slid = qp->remote_ah_attr.dlid;
2102 * It seems that IB mandates the presence of an SL in a
2103 * work completion only for the UD transport (see section
2104 * 11.4.2 of IBTA Vol. 1).
2106 * However, the way the SL is chosen below is consistent
2107 * with the way that IB/qib works and is trying avoid
2108 * introducing incompatibilities.
2110 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2112 wc.sl = qp->remote_ah_attr.sl;
2113 /* zero fields that are N/A */
2116 wc.dlid_path_bits = 0;
2118 /* Signal completion event if the solicited bit is set. */
2119 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
2120 (bth0 & IB_BTH_SOLICITED) != 0);
2123 case OP(RDMA_WRITE_ONLY):
2124 copy_last = rvt_is_user_qp(qp);
2126 case OP(RDMA_WRITE_FIRST):
2127 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2128 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2131 reth = &ohdr->u.rc.reth;
2132 qp->r_len = be32_to_cpu(reth->length);
2134 qp->r_sge.sg_list = NULL;
2135 if (qp->r_len != 0) {
2136 u32 rkey = be32_to_cpu(reth->rkey);
2137 u64 vaddr = get_ib_reth_vaddr(reth);
2140 /* Check rkey & NAK */
2141 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2142 rkey, IB_ACCESS_REMOTE_WRITE);
2145 qp->r_sge.num_sge = 1;
2147 qp->r_sge.num_sge = 0;
2148 qp->r_sge.sge.mr = NULL;
2149 qp->r_sge.sge.vaddr = NULL;
2150 qp->r_sge.sge.length = 0;
2151 qp->r_sge.sge.sge_length = 0;
2153 if (opcode == OP(RDMA_WRITE_FIRST))
2155 else if (opcode == OP(RDMA_WRITE_ONLY))
2156 goto no_immediate_data;
2157 ret = hfi1_rvt_get_rwqe(qp, 1);
2162 wc.ex.imm_data = ohdr->u.rc.imm_data;
2163 wc.wc_flags = IB_WC_WITH_IMM;
2166 case OP(RDMA_READ_REQUEST): {
2167 struct rvt_ack_entry *e;
2171 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2173 next = qp->r_head_ack_queue + 1;
2174 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2175 if (next > HFI1_MAX_RDMA_ATOMIC)
2177 spin_lock_irqsave(&qp->s_lock, flags);
2178 if (unlikely(next == qp->s_tail_ack_queue)) {
2179 if (!qp->s_ack_queue[next].sent)
2180 goto nack_inv_unlck;
2181 update_ack_queue(qp, next);
2183 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2184 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2185 rvt_put_mr(e->rdma_sge.mr);
2186 e->rdma_sge.mr = NULL;
2188 reth = &ohdr->u.rc.reth;
2189 len = be32_to_cpu(reth->length);
2191 u32 rkey = be32_to_cpu(reth->rkey);
2192 u64 vaddr = get_ib_reth_vaddr(reth);
2195 /* Check rkey & NAK */
2196 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2197 rkey, IB_ACCESS_REMOTE_READ);
2199 goto nack_acc_unlck;
2201 * Update the next expected PSN. We add 1 later
2202 * below, so only add the remainder here.
2204 qp->r_psn += rvt_div_mtu(qp, len - 1);
2206 e->rdma_sge.mr = NULL;
2207 e->rdma_sge.vaddr = NULL;
2208 e->rdma_sge.length = 0;
2209 e->rdma_sge.sge_length = 0;
2214 e->lpsn = qp->r_psn;
2216 * We need to increment the MSN here instead of when we
2217 * finish sending the result since a duplicate request would
2218 * increment it more than once.
2222 qp->r_state = opcode;
2223 qp->r_nak_state = 0;
2224 qp->r_head_ack_queue = next;
2226 /* Schedule the send engine. */
2227 qp->s_flags |= RVT_S_RESP_PENDING;
2228 hfi1_schedule_send(qp);
2230 spin_unlock_irqrestore(&qp->s_lock, flags);
2236 case OP(COMPARE_SWAP):
2237 case OP(FETCH_ADD): {
2238 struct ib_atomic_eth *ateth;
2239 struct rvt_ack_entry *e;
2246 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2248 next = qp->r_head_ack_queue + 1;
2249 if (next > HFI1_MAX_RDMA_ATOMIC)
2251 spin_lock_irqsave(&qp->s_lock, flags);
2252 if (unlikely(next == qp->s_tail_ack_queue)) {
2253 if (!qp->s_ack_queue[next].sent)
2254 goto nack_inv_unlck;
2255 update_ack_queue(qp, next);
2257 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2258 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2259 rvt_put_mr(e->rdma_sge.mr);
2260 e->rdma_sge.mr = NULL;
2262 ateth = &ohdr->u.atomic_eth;
2263 vaddr = get_ib_ateth_vaddr(ateth);
2264 if (unlikely(vaddr & (sizeof(u64) - 1)))
2265 goto nack_inv_unlck;
2266 rkey = be32_to_cpu(ateth->rkey);
2267 /* Check rkey & NAK */
2268 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2270 IB_ACCESS_REMOTE_ATOMIC)))
2271 goto nack_acc_unlck;
2272 /* Perform atomic OP and save result. */
2273 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2274 sdata = get_ib_ateth_swap(ateth);
2275 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2276 (u64)atomic64_add_return(sdata, maddr) - sdata :
2277 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2278 get_ib_ateth_compare(ateth),
2280 rvt_put_mr(qp->r_sge.sge.mr);
2281 qp->r_sge.num_sge = 0;
2288 qp->r_state = opcode;
2289 qp->r_nak_state = 0;
2290 qp->r_head_ack_queue = next;
2292 /* Schedule the send engine. */
2293 qp->s_flags |= RVT_S_RESP_PENDING;
2294 hfi1_schedule_send(qp);
2296 spin_unlock_irqrestore(&qp->s_lock, flags);
2303 /* NAK unknown opcodes. */
2307 qp->r_state = opcode;
2308 qp->r_ack_psn = psn;
2309 qp->r_nak_state = 0;
2310 /* Send an ACK if requested or required. */
2311 if (psn & IB_BTH_REQ_ACK) {
2312 struct hfi1_qp_priv *priv = qp->priv;
2314 if (packet->numpkt == 0) {
2318 if (priv->r_adefered >= HFI1_PSN_CREDIT) {
2322 if (unlikely(is_fecn)) {
2327 rc_defered_ack(rcd, qp);
2332 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
2333 qp->r_ack_psn = qp->r_psn;
2334 /* Queue RNR NAK for later */
2335 rc_defered_ack(rcd, qp);
2339 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2340 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2341 qp->r_ack_psn = qp->r_psn;
2342 /* Queue NAK for later */
2343 rc_defered_ack(rcd, qp);
2347 spin_unlock_irqrestore(&qp->s_lock, flags);
2349 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2350 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2351 qp->r_ack_psn = qp->r_psn;
2352 /* Queue NAK for later */
2353 rc_defered_ack(rcd, qp);
2357 spin_unlock_irqrestore(&qp->s_lock, flags);
2359 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2360 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2361 qp->r_ack_psn = qp->r_psn;
2363 hfi1_send_rc_ack(rcd, qp, is_fecn);
2366 void hfi1_rc_hdrerr(
2367 struct hfi1_ctxtdata *rcd,
2368 struct ib_header *hdr,
2372 int has_grh = rcv_flags & HFI1_HAS_GRH;
2373 struct ib_other_headers *ohdr;
2374 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2382 ohdr = &hdr->u.l.oth;
2384 bth0 = be32_to_cpu(ohdr->bth[0]);
2385 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
2388 psn = be32_to_cpu(ohdr->bth[2]);
2389 opcode = (bth0 >> 24) & 0xff;
2391 /* Only deal with RDMA Writes for now */
2392 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
2393 diff = delta_psn(psn, qp->r_psn);
2394 if (!qp->r_nak_state && diff >= 0) {
2395 ibp->rvp.n_rc_seqnak++;
2396 qp->r_nak_state = IB_NAK_PSN_ERROR;
2397 /* Use the expected PSN. */
2398 qp->r_ack_psn = qp->r_psn;
2400 * Wait to send the sequence
2401 * NAK until all packets
2402 * in the receive queue have
2404 * Otherwise, we end up
2405 * propagating congestion.
2407 rc_defered_ack(rcd, qp);
2408 } /* Out of sequence NAK */
2409 } /* QP Request NAKs */